diff --git a/.backportrc.json b/.backportrc.json index 59843f4d5f134..77b06cd419275 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,9 +1,9 @@ { "upstream" : "elastic/elasticsearch", - "targetBranchChoices" : [ "main", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetBranchChoices" : [ "main", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], "targetPRLabels" : [ "backport" ], "branchLabelMapping" : { - "^v8.15.0$" : "main", + "^v8.16.0$" : "main", "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } } \ No newline at end of file diff --git a/.buildkite/pipelines/dra-workflow.yml b/.buildkite/pipelines/dra-workflow.yml index 32a2b7d22134a..bcc6c9c57d756 100644 --- a/.buildkite/pipelines/dra-workflow.yml +++ b/.buildkite/pipelines/dra-workflow.yml @@ -7,7 +7,7 @@ steps: image: family/elasticsearch-ubuntu-2204 machineType: custom-32-98304 buildDirectory: /dev/shm/bk - diskSizeGb: 250 + diskSizeGb: 350 - wait # The hadoop build depends on the ES artifact # So let's trigger the hadoop build any time we build a new staging artifact diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 4124d4e550d11..776b1ab944f69 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.23", "8.14.2", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 4217fc91bf0fd..e9c743885d78d 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -577,8 +577,8 @@ steps: env: BWC_VERSION: 8.13.4 - - label: "{{matrix.image}} / 8.14.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.2 + - label: "{{matrix.image}} / 8.14.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.4 timeout_in_minutes: 300 matrix: setup: @@ -592,7 +592,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 8.14.2 + BWC_VERSION: 8.14.4 - label: "{{matrix.image}} / 8.15.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.0 @@ -611,6 +611,23 @@ steps: env: BWC_VERSION: 8.15.0 + - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + diskSizeGb: 250 + env: + BWC_VERSION: 8.16.0 + - group: packaging-tests-windows steps: - label: "{{matrix.image}} / packaging-tests-windows" diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 06e7ffbc8fb1c..f908b946bb523 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -642,8 +642,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.14.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.2#bwcTest + - label: 8.14.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.4#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -653,7 +653,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 8.14.2 + BWC_VERSION: 8.14.4 retry: automatic: - exit_status: "-1" @@ -682,6 +682,26 @@ steps: - signal_reason: agent_stop limit: 3 + - label: 8.16.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + preemptible: true + diskSizeGb: 250 + env: + BWC_VERSION: 8.16.0 + retry: + automatic: + - exit_status: "-1" + limit: 3 + signal_reason: none + - signal_reason: agent_stop + limit: 3 + - label: concurrent-search-tests command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true functionalTests timeout_in_minutes: 420 @@ -751,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.23", "8.14.2", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -801,7 +821,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.23", "8.14.2", "8.15.0"] + BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml b/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml deleted file mode 100644 index d37bdf380f926..0000000000000 --- a/.buildkite/pipelines/pull-request/bwc-snapshots-windows.yml +++ /dev/null @@ -1,20 +0,0 @@ -config: - allow-labels: test-windows -steps: - - group: bwc-snapshots-windows - steps: - - label: "{{matrix.BWC_VERSION}} / bwc-snapshots-windows" - key: "bwc-snapshots-windows" - command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh - env: - GRADLE_TASK: "v{{matrix.BWC_VERSION}}#bwcTest" - timeout_in_minutes: 300 - matrix: - setup: - BWC_VERSION: $SNAPSHOT_BWC_VERSIONS - agents: - provider: gcp - image: family/elasticsearch-windows-2022 - machineType: custom-32-98304 - diskType: pd-ssd - diskSizeGb: 350 diff --git a/.buildkite/scripts/gradle-cache-validation.sh b/.buildkite/scripts/gradle-cache-validation.sh new file mode 100755 index 0000000000000..fbb957bc3b26b --- /dev/null +++ b/.buildkite/scripts/gradle-cache-validation.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +set -euo pipefail + +VALIDATION_SCRIPTS_VERSION=2.5.1 +GRADLE_ENTERPRISE_ACCESS_KEY=$(vault kv get -field=value secret/ci/elastic-elasticsearch/gradle-enterprise-api-key) +export GRADLE_ENTERPRISE_ACCESS_KEY + +curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip + +# Create a temporary file +tmpOutputFile=$(mktemp) +trap "rm $tmpOutputFile" EXIT + +gradle-enterprise-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --gradle-enterprise-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile + +# Capture the return value +retval=$? + +# Now read the content from the temporary file into a variable +perfOutput=$(cat $tmpOutputFile | sed -n '/Performance Characteristics/,/See https:\/\/gradle.com\/bvs\/main\/Gradle.md#performance-characteristics for details./p' | sed '$d' | sed 's/\x1b\[[0-9;]*m//g') +investigationOutput=$(cat $tmpOutputFile | sed -n '/Investigation Quick Links/,$p' | sed 's/\x1b\[[0-9;]*m//g') + +# Initialize HTML output variable +summaryHtml="

Performance Characteristics

" +summaryHtml+="" + +# generate html for links +summaryHtml+="

Investigation Links

" +summaryHtml+="" + +cat << EOF | buildkite-agent annotate --context "ctx-validation-summary" --style "info" +$summaryHtml +EOF + +# Check if the command was successful +if [ $retval -eq 0 ]; then + echo "Experiment completed successfully" +elif [ $retval -eq 1 ]; then + echo "An invalid input was provided while attempting to run the experiment" +elif [ $retval -eq 2 ]; then + echo "One of the builds that is part of the experiment failed" +elif [ $retval -eq 3 ]; then + echo "The build was not fully cacheable for the given task graph" +elif [ $retval -eq 3 ]; then + echo "An unclassified, fatal error happened while running the experiment" +fi + +exit $retval + diff --git a/.ci/bwcVersions b/.ci/bwcVersions index bce556e9fc352..776be80e0d291 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -31,5 +31,6 @@ BWC_VERSION: - "8.11.4" - "8.12.2" - "8.13.4" - - "8.14.2" + - "8.14.4" - "8.15.0" + - "8.16.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 5fc4b6c072899..f5f7f7a7d4ecb 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,5 @@ BWC_VERSION: - "7.17.23" - - "8.14.2" + - "8.14.4" - "8.15.0" + - "8.16.0" diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 8753d4a4762b7..49e81a67e85f9 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -47,8 +47,8 @@ dependencies { api "org.openjdk.jmh:jmh-core:$versions.jmh" annotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" // Dependencies of JMH - runtimeOnly 'net.sf.jopt-simple:jopt-simple:4.6' - runtimeOnly 'org.apache.commons:commons-math3:3.2' + runtimeOnly 'net.sf.jopt-simple:jopt-simple:5.0.4' + runtimeOnly 'org.apache.commons:commons-math3:3.6.1' } // enable the JMH's BenchmarkProcessor to generate the final benchmark classes diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java index 4bb33937579c2..2185c6d1df611 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java @@ -41,6 +41,7 @@ import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.topn.TopNOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; @@ -189,6 +190,11 @@ public String indexName() { return "benchmark"; } + @Override + public IndexSettings indexSettings() { + throw new UnsupportedOperationException(); + } + @Override public MappedFieldType.FieldExtractPreference fieldExtractPreference() { return MappedFieldType.FieldExtractPreference.NONE; diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java index 230e0c7e546c2..691874c775302 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/aggregations/TermsReduceBenchmark.java @@ -71,7 +71,7 @@ public class TermsReduceBenchmark { private final SearchPhaseController controller = new SearchPhaseController((task, req) -> new AggregationReduceContext.Builder() { @Override public AggregationReduceContext forPartialReduction() { - return new AggregationReduceContext.ForPartial(null, null, task, builder); + return new AggregationReduceContext.ForPartial(null, null, task, builder, b -> {}); } @Override diff --git a/branches.json b/branches.json index 2794b545facc6..b852cd1fa5dbd 100644 --- a/branches.json +++ b/branches.json @@ -4,6 +4,9 @@ { "branch": "main" }, + { + "branch": "8.15" + }, { "branch": "8.14" }, diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 84e56bbaf03ad..a8d1110ff4736 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -274,10 +274,7 @@ dependencies { // ensuring brought asm version brought in by spock is up-to-date testImplementation buildLibs.asm integTestImplementation buildLibs.asm - integTestImplementation('org.ow2.asm:asm:9.6') - api("org.yaml:snakeyaml") { - version { strictly(versions.snakeyaml) } - } + api(buildLibs.snakeyaml) } // Forcefully downgrade the jackson platform as used in production api enforcedPlatform(buildLibs.jackson.platform) @@ -314,7 +311,7 @@ dependencies { compileOnly buildLibs.checkstyle compileOnly buildLibs.reflections - implementation 'com.github.javaparser:javaparser-core:3.18.0' + implementation buildLibs.javaparser runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation buildLibs.checkstyle diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index c6930c2263ec3..7cba4730e88da 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -26,13 +26,10 @@ develocity { if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') { publishing.onlyIf { true } server = 'https://gradle-enterprise.elastic.co' - } else { - publishing.onlyIf { - server.isPresent(); - } + } else if( server.isPresent() == false) { + publishing.onlyIf { false } } - background { tag OS.current().name() tag Architecture.current().name() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index 4f9498c8f33a6..b513fd7b93631 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal; -import com.gradle.scan.plugin.BuildScanExtension; +import com.gradle.develocity.agent.gradle.DevelocityConfiguration; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; @@ -64,7 +64,7 @@ public void apply(Project target) { File targetFile = target.file("build/" + buildNumber + ".tar.bz2"); File projectDir = target.getProjectDir(); File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); - BuildScanExtension extension = target.getExtensions().getByType(BuildScanExtension.class); + DevelocityConfiguration extension = target.getExtensions().getByType(DevelocityConfiguration.class); File daemonsLogDir = new File(target.getGradle().getGradleUserHomeDir(), "daemon/" + target.getGradle().getGradleVersion()); getFlowScope().always(BuildFinishedFlowAction.class, spec -> { @@ -125,7 +125,7 @@ interface Parameters extends FlowParameters { ListProperty getFilteredFiles(); @Input - Property getBuildScan(); + Property getBuildScan(); } @@ -198,7 +198,7 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo + System.getenv("BUILDKITE_JOB_ID") + "/artifacts/" + artifactUuid; - parameters.getBuildScan().get().link("Artifact Upload", targetLink); + parameters.getBuildScan().get().getBuildScan().link("Artifact Upload", targetLink); } } catch (Exception e) { System.out.println("Failed to upload buildkite artifact " + e.getMessage()); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index d344b4694a5b5..689c8ddecb057 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -25,6 +25,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPlugin; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.testing.Test; @@ -33,16 +34,21 @@ import java.util.List; import java.util.Map; +import javax.inject.Inject; + import static org.elasticsearch.gradle.util.FileUtils.mkdirs; import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure; /** * Applies commonly used settings to all Test tasks in the project */ -public class ElasticsearchTestBasePlugin implements Plugin { +public abstract class ElasticsearchTestBasePlugin implements Plugin { public static final String DUMP_OUTPUT_ON_FAILURE_PROP_NAME = "dumpOutputOnFailure"; + @Inject + protected abstract ProviderFactory getProviderFactory(); + @Override public void apply(Project project) { project.getPluginManager().apply(GradleTestPolicySetupPlugin.class); @@ -150,13 +156,11 @@ public void execute(Task t) { // we use 'temp' relative to CWD since this is per JVM and tests are forbidden from writing to CWD nonInputProperties.systemProperty("java.io.tmpdir", test.getWorkingDir().toPath().resolve("temp")); + test.systemProperties(getProviderFactory().systemPropertiesPrefixedBy("tests.").get()); + test.systemProperties(getProviderFactory().systemPropertiesPrefixedBy("es.").get()); + // TODO: remove setting logging level via system property test.systemProperty("tests.logger.level", "WARN"); - System.getProperties().entrySet().forEach(entry -> { - if ((entry.getKey().toString().startsWith("tests.") || entry.getKey().toString().startsWith("es."))) { - test.systemProperty(entry.getKey().toString(), entry.getValue()); - } - }); // TODO: remove this once ctx isn't added to update script params in 7.0 test.systemProperty("es.scripting.update.ctx_in_params", "false"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java index 13f265388fe3f..a4412cd3db247 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java @@ -59,7 +59,6 @@ public class InternalDistributionModuleCheckTaskProvider { "org.elasticsearch.plugin", "org.elasticsearch.plugin.analysis", "org.elasticsearch.pluginclassloader", - "org.elasticsearch.preallocate", "org.elasticsearch.securesm", "org.elasticsearch.server", "org.elasticsearch.simdvec", diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 42834928bafed..b8ebb454ddb16 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -51,6 +51,7 @@ import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.Optional; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -97,24 +98,25 @@ public void apply(Project project) { JavaVersion minimumCompilerVersion = JavaVersion.toVersion(getResourceContents("/minimumCompilerVersion")); JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion")); - File runtimeJavaHome = findRuntimeJavaHome(); - boolean isRuntimeJavaHomeSet = Jvm.current().getJavaHome().equals(runtimeJavaHome) == false; + Optional selectedRuntimeJavaHome = findRuntimeJavaHome(); + File actualRuntimeJavaHome = selectedRuntimeJavaHome.orElse(Jvm.current().getJavaHome()); + boolean isRuntimeJavaHomeSet = selectedRuntimeJavaHome.isPresent(); GitInfo gitInfo = GitInfo.gitInfo(project.getRootDir()); BuildParams.init(params -> { params.reset(); - params.setRuntimeJavaHome(runtimeJavaHome); + params.setRuntimeJavaHome(actualRuntimeJavaHome); params.setJavaToolChainSpec(resolveToolchainSpecFromEnv()); params.setRuntimeJavaVersion( determineJavaVersion( "runtime java.home", - runtimeJavaHome, + actualRuntimeJavaHome, isRuntimeJavaHomeSet ? minimumRuntimeVersion : Jvm.current().getJavaVersion() ) ); params.setIsRuntimeJavaHomeSet(isRuntimeJavaHomeSet); - JvmInstallationMetadata runtimeJdkMetaData = metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)); + JvmInstallationMetadata runtimeJdkMetaData = metadataDetector.getMetadata(getJavaInstallation(actualRuntimeJavaHome)); params.setRuntimeJavaDetails(formatJavaVendorDetails(runtimeJdkMetaData)); params.setJavaVersions(getAvailableJavaVersions()); params.setMinimumCompilerVersion(minimumCompilerVersion); @@ -298,19 +300,19 @@ private static void assertMinimumCompilerVersion(JavaVersion minimumCompilerVers } } - private File findRuntimeJavaHome() { + private Optional findRuntimeJavaHome() { String runtimeJavaProperty = System.getProperty("runtime.java"); if (runtimeJavaProperty != null) { - return resolveJavaHomeFromToolChainService(runtimeJavaProperty); + return Optional.of(resolveJavaHomeFromToolChainService(runtimeJavaProperty)); } String env = System.getenv("RUNTIME_JAVA_HOME"); if (env != null) { - return new File(env); + return Optional.of(new File(env)); } // fall back to tool chain if set. env = System.getenv("JAVA_TOOLCHAIN_HOME"); - return env == null ? Jvm.current().getJavaHome() : new File(env); + return env == null ? Optional.empty() : Optional.of(new File(env)); } @NotNull diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java index 0099a4616f829..07817fdaed1fe 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java @@ -23,11 +23,14 @@ import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; import org.gradle.api.specs.Spec; +import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputDirectory; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskAction; import java.io.File; @@ -89,6 +92,7 @@ * for the dependency. This artifact will be redistributed by us with the release to * comply with the license terms. */ +@CacheableTask public abstract class DependencyLicensesTask extends DefaultTask { private final Pattern regex = Pattern.compile("-v?\\d+.*"); @@ -149,6 +153,7 @@ public DependencyLicensesTask(ObjectFactory objects, ProjectLayout projectLayout } @InputFiles + @PathSensitive(PathSensitivity.NAME_ONLY) public FileCollection getDependencies() { return dependencies; } @@ -159,6 +164,7 @@ public void setDependencies(FileCollection dependencies) { @Optional @InputDirectory + @PathSensitive(PathSensitivity.RELATIVE) public File getLicensesDir() { File asFile = licensesDir.get().getAsFile(); if (asFile.exists()) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/SplitPackagesAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/SplitPackagesAuditTask.java index ec279589a6bed..f75adbe640297 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/SplitPackagesAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/SplitPackagesAuditTask.java @@ -20,6 +20,7 @@ import org.gradle.api.provider.MapProperty; import org.gradle.api.provider.Property; import org.gradle.api.provider.SetProperty; +import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.CompileClasspath; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; @@ -56,6 +57,7 @@ /** * Checks for split packages with dependencies. These are not allowed in a future modularized world. */ +@CacheableTask public class SplitPackagesAuditTask extends DefaultTask { private static final Logger LOGGER = Logging.getLogger(SplitPackagesAuditTask.class); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/AbstractVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/AbstractVersionsTask.java index 0ab3a9b917d65..ad39faad1bc85 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/AbstractVersionsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/AbstractVersionsTask.java @@ -8,19 +8,119 @@ package org.elasticsearch.gradle.internal.release; +import com.github.javaparser.GeneratedJavaParserConstants; +import com.github.javaparser.ast.CompilationUnit; +import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration; +import com.github.javaparser.ast.body.FieldDeclaration; +import com.github.javaparser.ast.expr.IntegerLiteralExpr; +import com.github.javaparser.ast.observer.ObservableProperty; +import com.github.javaparser.printer.ConcreteSyntaxModel; +import com.github.javaparser.printer.concretesyntaxmodel.CsmElement; +import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; + import org.gradle.api.DefaultTask; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; import org.gradle.initialization.layout.BuildLayout; +import java.io.IOException; +import java.lang.reflect.Field; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.List; +import java.util.Map; +import java.util.OptionalInt; +import java.util.stream.Collectors; + +import static com.github.javaparser.ast.observer.ObservableProperty.TYPE_PARAMETERS; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmConditional.Condition.FLAG; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.block; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.child; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.comma; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.comment; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.conditional; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.list; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.newline; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.none; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.sequence; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.space; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.string; +import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.token; public abstract class AbstractVersionsTask extends DefaultTask { + static { + replaceDefaultJavaParserClassCsm(); + } + + /* + * The default JavaParser CSM which it uses to format any new declarations added to a class + * inserts two newlines after each declaration. Our version classes only have one newline. + * In order to get javaparser lexical printer to use our format, we have to completely replace + * the statically declared CSM pattern using hacky reflection + * to access the static map where these are stored, and insert a replacement that is identical + * apart from only one newline at the end of each member declaration, rather than two. + */ + private static void replaceDefaultJavaParserClassCsm() { + try { + Field classCsms = ConcreteSyntaxModel.class.getDeclaredField("concreteSyntaxModelByClass"); + classCsms.setAccessible(true); + @SuppressWarnings({ "unchecked", "rawtypes" }) + Map csms = (Map) classCsms.get(null); + + // copied from the static initializer in ConcreteSyntaxModel + csms.put( + ClassOrInterfaceDeclaration.class, + sequence( + comment(), + list(ObservableProperty.ANNOTATIONS, newline(), none(), newline()), + list(ObservableProperty.MODIFIERS, space(), none(), space()), + conditional( + ObservableProperty.INTERFACE, + FLAG, + token(GeneratedJavaParserConstants.INTERFACE), + token(GeneratedJavaParserConstants.CLASS) + ), + space(), + child(ObservableProperty.NAME), + list( + TYPE_PARAMETERS, + sequence(comma(), space()), + string(GeneratedJavaParserConstants.LT), + string(GeneratedJavaParserConstants.GT) + ), + list( + ObservableProperty.EXTENDED_TYPES, + sequence(string(GeneratedJavaParserConstants.COMMA), space()), + sequence(space(), token(GeneratedJavaParserConstants.EXTENDS), space()), + none() + ), + list( + ObservableProperty.IMPLEMENTED_TYPES, + sequence(string(GeneratedJavaParserConstants.COMMA), space()), + sequence(space(), token(GeneratedJavaParserConstants.IMPLEMENTS), space()), + none() + ), + space(), + block(sequence(newline(), list(ObservableProperty.MEMBERS, sequence(newline()/*, newline()*/), newline(), newline()))) + ) + ); + } catch (ReflectiveOperationException e) { + throw new AssertionError(e); + } + } + + private static final Logger LOGGER = Logging.getLogger(AbstractVersionsTask.class); + static final String TRANSPORT_VERSION_TYPE = "TransportVersion"; static final String INDEX_VERSION_TYPE = "IndexVersion"; static final String SERVER_MODULE_PATH = "server/src/main/java/"; - static final String TRANSPORT_VERSION_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/TransportVersions.java"; - static final String INDEX_VERSION_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/index/IndexVersions.java"; + + static final String VERSION_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/Version.java"; + static final String TRANSPORT_VERSIONS_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/TransportVersions.java"; + static final String INDEX_VERSIONS_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/index/IndexVersions.java"; static final String SERVER_RESOURCES_PATH = "server/src/main/resources/"; static final String TRANSPORT_VERSIONS_RECORD = SERVER_RESOURCES_PATH + "org/elasticsearch/TransportVersions.csv"; @@ -32,4 +132,34 @@ protected AbstractVersionsTask(BuildLayout layout) { rootDir = layout.getRootDirectory().toPath(); } + static Map splitVersionIds(List version) { + return version.stream().map(l -> { + var split = l.split(":"); + if (split.length != 2) throw new IllegalArgumentException("Invalid tag format [" + l + "]"); + return split; + }).collect(Collectors.toMap(l -> l[0], l -> Integer.parseInt(l[1]))); + } + + static OptionalInt findSingleIntegerExpr(FieldDeclaration field) { + var ints = field.findAll(IntegerLiteralExpr.class); + switch (ints.size()) { + case 0 -> { + return OptionalInt.empty(); + } + case 1 -> { + return OptionalInt.of(ints.get(0).asNumber().intValue()); + } + default -> { + LOGGER.warn("Multiple integers found in version field declaration [{}]", field); // and ignore it + return OptionalInt.empty(); + } + } + } + + static void writeOutNewContents(Path file, CompilationUnit unit) throws IOException { + if (unit.containsData(LexicalPreservingPrinter.NODE_TEXT_DATA) == false) { + throw new IllegalArgumentException("CompilationUnit has no lexical information for output"); + } + Files.writeString(file, LexicalPreservingPrinter.print(unit), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ExtractCurrentVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ExtractCurrentVersionsTask.java index 3530d7ef9e807..53dd55041f6bd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ExtractCurrentVersionsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ExtractCurrentVersionsTask.java @@ -11,7 +11,6 @@ import com.github.javaparser.StaticJavaParser; import com.github.javaparser.ast.CompilationUnit; import com.github.javaparser.ast.body.FieldDeclaration; -import com.github.javaparser.ast.expr.IntegerLiteralExpr; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; @@ -53,11 +52,11 @@ public void executeTask() throws IOException { LOGGER.lifecycle("Extracting latest version information"); List output = new ArrayList<>(); - int transportVersion = readLatestVersion(rootDir.resolve(TRANSPORT_VERSION_FILE_PATH)); + int transportVersion = readLatestVersion(rootDir.resolve(TRANSPORT_VERSIONS_FILE_PATH)); LOGGER.lifecycle("Transport version: {}", transportVersion); output.add(TRANSPORT_VERSION_TYPE + ":" + transportVersion); - int indexVersion = readLatestVersion(rootDir.resolve(INDEX_VERSION_FILE_PATH)); + int indexVersion = readLatestVersion(rootDir.resolve(INDEX_VERSIONS_FILE_PATH)); LOGGER.lifecycle("Index version: {}", indexVersion); output.add(INDEX_VERSION_TYPE + ":" + indexVersion); @@ -74,21 +73,13 @@ Integer highestVersionId() { @Override public void accept(FieldDeclaration fieldDeclaration) { - var ints = fieldDeclaration.findAll(IntegerLiteralExpr.class); - switch (ints.size()) { - case 0 -> { - // No ints in the field declaration, ignore + findSingleIntegerExpr(fieldDeclaration).ifPresent(id -> { + if (highestVersionId != null && highestVersionId > id) { + LOGGER.warn("Version ids [{}, {}] out of order", highestVersionId, id); + } else { + highestVersionId = id; } - case 1 -> { - int id = ints.get(0).asNumber().intValue(); - if (highestVersionId != null && highestVersionId > id) { - LOGGER.warn("Version ids [{}, {}] out of order", highestVersionId, id); - } else { - highestVersionId = id; - } - } - default -> LOGGER.warn("Multiple integers found in version field declaration [{}]", fieldDeclaration); // and ignore it - } + }); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index 8001b82797557..08abb02ea831e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -52,6 +52,7 @@ public void apply(Project project) { project.getTasks().register("extractCurrentVersions", ExtractCurrentVersionsTask.class); project.getTasks().register("tagVersions", TagVersionsTask.class); + project.getTasks().register("setCompatibleVersions", SetCompatibleVersionsTask.class); final FileTree yamlFiles = projectDirectory.dir("docs/changelog") .getAsFileTree() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java new file mode 100644 index 0000000000000..15e0a0cc345d5 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.github.javaparser.StaticJavaParser; +import com.github.javaparser.ast.CompilationUnit; +import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration; +import com.github.javaparser.ast.expr.NameExpr; +import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; + +import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.options.Option; +import org.gradle.initialization.layout.BuildLayout; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import javax.inject.Inject; + +public class SetCompatibleVersionsTask extends AbstractVersionsTask { + + private Map versionIds = Map.of(); + + @Inject + public SetCompatibleVersionsTask(BuildLayout layout) { + super(layout); + } + + @Option(option = "version-id", description = "Version id used for the release. Of the form :.") + public void versionIds(List version) { + this.versionIds = splitVersionIds(version); + } + + @TaskAction + public void executeTask() throws IOException { + if (versionIds.isEmpty()) { + throw new IllegalArgumentException("No version ids specified"); + } + Integer transportVersion = versionIds.get(TRANSPORT_VERSION_TYPE); + if (transportVersion == null) { + throw new IllegalArgumentException("TransportVersion id not specified"); + } + + Path versionJava = rootDir.resolve(TRANSPORT_VERSIONS_FILE_PATH); + CompilationUnit file = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionJava)); + + Optional modifiedFile; + + modifiedFile = setMinimumCcsTransportVersion(file, transportVersion); + + if (modifiedFile.isPresent()) { + writeOutNewContents(versionJava, modifiedFile.get()); + } + } + + static Optional setMinimumCcsTransportVersion(CompilationUnit unit, int transportVersion) { + ClassOrInterfaceDeclaration transportVersions = unit.getClassByName("TransportVersions").get(); + + String tvConstantName = transportVersions.getFields().stream().filter(f -> { + var i = findSingleIntegerExpr(f); + return i.isPresent() && i.getAsInt() == transportVersion; + }) + .map(f -> f.getVariable(0).getNameAsString()) + .findFirst() + .orElseThrow(() -> new IllegalStateException("Could not find constant for id " + transportVersion)); + + transportVersions.getFieldByName("MINIMUM_CCS_VERSION") + .orElseThrow(() -> new IllegalStateException("Could not find MINIMUM_CCS_VERSION constant")) + .getVariable(0) + .setInitializer(new NameExpr(tvConstantName)); + + return Optional.of(unit); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TagVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TagVersionsTask.java index fa11746543e82..a7f67f87b602e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TagVersionsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TagVersionsTask.java @@ -47,11 +47,7 @@ public void release(String version) { @Option(option = "tag-version", description = "Version id to tag. Of the form :.") public void tagVersions(List version) { - this.tagVersions = version.stream().map(l -> { - var split = l.split(":"); - if (split.length != 2) throw new IllegalArgumentException("Invalid tag format [" + l + "]"); - return split; - }).collect(Collectors.toMap(l -> l[0], l -> Integer.parseInt(l[1]))); + this.tagVersions = splitVersionIds(version); } @TaskAction diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java index 9996ffe613545..b19e5c0beacf8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java @@ -8,7 +8,6 @@ package org.elasticsearch.gradle.internal.release; -import com.github.javaparser.GeneratedJavaParserConstants; import com.github.javaparser.StaticJavaParser; import com.github.javaparser.ast.CompilationUnit; import com.github.javaparser.ast.NodeList; @@ -16,14 +15,10 @@ import com.github.javaparser.ast.body.FieldDeclaration; import com.github.javaparser.ast.body.VariableDeclarator; import com.github.javaparser.ast.expr.NameExpr; -import com.github.javaparser.ast.observer.ObservableProperty; -import com.github.javaparser.printer.ConcreteSyntaxModel; -import com.github.javaparser.printer.concretesyntaxmodel.CsmElement; import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; import com.google.common.annotations.VisibleForTesting; import org.elasticsearch.gradle.Version; -import org.gradle.api.DefaultTask; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.tasks.TaskAction; @@ -31,10 +26,7 @@ import org.gradle.initialization.layout.BuildLayout; import java.io.IOException; -import java.lang.reflect.Field; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardOpenOption; import java.util.Map; import java.util.NavigableMap; import java.util.Objects; @@ -47,93 +39,12 @@ import javax.annotation.Nullable; import javax.inject.Inject; -import static com.github.javaparser.ast.observer.ObservableProperty.TYPE_PARAMETERS; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmConditional.Condition.FLAG; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.block; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.child; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.comma; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.comment; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.conditional; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.list; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.newline; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.none; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.sequence; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.space; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.string; -import static com.github.javaparser.printer.concretesyntaxmodel.CsmElement.token; - -public class UpdateVersionsTask extends DefaultTask { - - static { - replaceDefaultJavaParserClassCsm(); - } - - /* - * The default JavaParser CSM which it uses to format any new declarations added to a class - * inserts two newlines after each declaration. Our version classes only have one newline. - * In order to get javaparser lexical printer to use our format, we have to completely replace - * the statically declared CSM pattern using hacky reflection - * to access the static map where these are stored, and insert a replacement that is identical - * apart from only one newline at the end of each member declaration, rather than two. - */ - private static void replaceDefaultJavaParserClassCsm() { - try { - Field classCsms = ConcreteSyntaxModel.class.getDeclaredField("concreteSyntaxModelByClass"); - classCsms.setAccessible(true); - @SuppressWarnings({ "unchecked", "rawtypes" }) - Map csms = (Map) classCsms.get(null); - - // copied from the static initializer in ConcreteSyntaxModel - csms.put( - ClassOrInterfaceDeclaration.class, - sequence( - comment(), - list(ObservableProperty.ANNOTATIONS, newline(), none(), newline()), - list(ObservableProperty.MODIFIERS, space(), none(), space()), - conditional( - ObservableProperty.INTERFACE, - FLAG, - token(GeneratedJavaParserConstants.INTERFACE), - token(GeneratedJavaParserConstants.CLASS) - ), - space(), - child(ObservableProperty.NAME), - list( - TYPE_PARAMETERS, - sequence(comma(), space()), - string(GeneratedJavaParserConstants.LT), - string(GeneratedJavaParserConstants.GT) - ), - list( - ObservableProperty.EXTENDED_TYPES, - sequence(string(GeneratedJavaParserConstants.COMMA), space()), - sequence(space(), token(GeneratedJavaParserConstants.EXTENDS), space()), - none() - ), - list( - ObservableProperty.IMPLEMENTED_TYPES, - sequence(string(GeneratedJavaParserConstants.COMMA), space()), - sequence(space(), token(GeneratedJavaParserConstants.IMPLEMENTS), space()), - none() - ), - space(), - block(sequence(newline(), list(ObservableProperty.MEMBERS, sequence(newline()/*, newline()*/), newline(), newline()))) - ) - ); - } catch (ReflectiveOperationException e) { - throw new AssertionError(e); - } - } +public class UpdateVersionsTask extends AbstractVersionsTask { private static final Logger LOGGER = Logging.getLogger(UpdateVersionsTask.class); - static final String SERVER_MODULE_PATH = "server/src/main/java/"; - static final String VERSION_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/Version.java"; - static final Pattern VERSION_FIELD = Pattern.compile("V_(\\d+)_(\\d+)_(\\d+)(?:_(\\w+))?"); - final Path rootDir; - @Nullable private Version addVersion; private boolean setCurrent; @@ -142,7 +53,7 @@ private static void replaceDefaultJavaParserClassCsm() { @Inject public UpdateVersionsTask(BuildLayout layout) { - rootDir = layout.getRootDirectory().toPath(); + super(layout); } @Option(option = "add-version", description = "Specifies the version to add") @@ -287,11 +198,4 @@ static Optional removeVersionConstant(CompilationUnit versionJa return Optional.of(versionJava); } - - static void writeOutNewContents(Path file, CompilationUnit unit) throws IOException { - if (unit.containsData(LexicalPreservingPrinter.NODE_TEXT_DATA) == false) { - throw new IllegalArgumentException("CompilationUnit has no lexical information for output"); - } - Files.writeString(file, LexicalPreservingPrinter.print(unit), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java index d0c7e9316d996..ec86798e653f1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java @@ -88,7 +88,7 @@ public String url(String os, String arch, String extension) { List builds = List.of( getBundledJdkBuild(), // 23 early access - new EarlyAccessJdkBuild(JavaLanguageVersion.of(23), "23", "23") + new EarlyAccessJdkBuild(JavaLanguageVersion.of(23), "23", "24") ); private JdkBuild getBundledJdkBuild() { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTaskTests.java new file mode 100644 index 0000000000000..eecb953a44eb6 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTaskTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.github.javaparser.StaticJavaParser; +import com.github.javaparser.ast.CompilationUnit; + +import org.junit.Test; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasToString; + +public class SetCompatibleVersionsTaskTests { + + @Test + public void updateMinCcsVersion() { + final String transportVersionsJava = """ + public class TransportVersions { + public static final TransportVersion V1 = def(100); + public static final TransportVersion V2 = def(200); + public static final TransportVersion V3 = def(300); + + public static final TransportVersion MINIMUM_CCS_VERSION = V2; + }"""; + final String updatedJava = """ + public class TransportVersions { + + public static final TransportVersion V1 = def(100); + + public static final TransportVersion V2 = def(200); + + public static final TransportVersion V3 = def(300); + + public static final TransportVersion MINIMUM_CCS_VERSION = V3; + } + """; + + CompilationUnit unit = StaticJavaParser.parse(transportVersionsJava); + + SetCompatibleVersionsTask.setMinimumCcsTransportVersion(unit, 300); + + assertThat(unit, hasToString(updatedJava)); + } +} diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 12417239cc7dc..1dd9fb95bd17b 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ -elasticsearch = 8.15.0 -lucene = 9.11.0 +elasticsearch = 8.16.0 +lucene = 9.11.1 bundled_jdk_vendor = openjdk bundled_jdk = 22.0.1+8@c7ec1332f7bb44aeba2eb341ae18aca4 @@ -49,7 +49,7 @@ commonsCompress = 1.24.0 reflections = 0.10.2 # benchmark dependencies -jmh = 1.26 +jmh = 1.37 # test dependencies # when updating this version, also update :qa:evil-tests diff --git a/distribution/build.gradle b/distribution/build.gradle index 77f1a2d032c73..47367ab0261a2 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -280,8 +280,6 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { dependencies { libs project(':server') - // this is a special circumstance of a jar that is not a dependency of server, but needs to be in the module path - libs project(':libs:elasticsearch-preallocate') libsVersionChecker project(':distribution:tools:java-version-checker') libsCliLauncher project(':distribution:tools:cli-launcher') diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 298b4671582b5..2a89f18209d11 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -69,11 +69,6 @@ static List systemJvmOptions(Settings nodeSettings, final Map + { + "description": "Grants user access to some indicies.", + "indices": [ + {"names": ["index1", "index2" ], "privileges": ["all"], "field_security" : {"grant" : [ "title", "body" ]}} + ], + "metadata" : {"version": 1} + } + ''' setups['jacknich_user'] = ''' - do: diff --git a/docs/changelog/106252.yaml b/docs/changelog/106252.yaml new file mode 100644 index 0000000000000..5e3f084632b9d --- /dev/null +++ b/docs/changelog/106252.yaml @@ -0,0 +1,6 @@ +pr: 106252 +summary: Add min/max range of the `event.ingested` field to cluster state for searchable + snapshots +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/106253.yaml b/docs/changelog/106253.yaml deleted file mode 100644 index b80cda37f63c7..0000000000000 --- a/docs/changelog/106253.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106253 -summary: Fix for from parameter when using `sub_searches` and rank -area: Ranking -type: bug -issues: - - 99011 diff --git a/docs/changelog/106520.yaml b/docs/changelog/106520.yaml new file mode 100644 index 0000000000000..c3fe69a4c3dbd --- /dev/null +++ b/docs/changelog/106520.yaml @@ -0,0 +1,6 @@ +pr: 106520 +summary: Updated the transport CA name in Security Auto-Configuration. +area: Security +type: bug +issues: + - 106455 diff --git a/docs/changelog/107047.yaml b/docs/changelog/107047.yaml new file mode 100644 index 0000000000000..89caed6f55074 --- /dev/null +++ b/docs/changelog/107047.yaml @@ -0,0 +1,6 @@ +pr: 107047 +summary: "Search/Mapping: KnnVectorQueryBuilder support for allowUnmappedFields" +area: Search +type: bug +issues: + - 106846 diff --git a/docs/changelog/107191.yaml b/docs/changelog/107191.yaml new file mode 100644 index 0000000000000..5ef6297c0f3f1 --- /dev/null +++ b/docs/changelog/107191.yaml @@ -0,0 +1,17 @@ +pr: 107191 +summary: Stricter failure handling in multi-repo get-snapshots request handling +area: Snapshot/Restore +type: bug +issues: [] +highlight: + title: Stricter failure handling in multi-repo get-snapshots request handling + body: | + If a multi-repo get-snapshots request encounters a failure in one of the + targeted repositories then earlier versions of Elasticsearch would proceed + as if the faulty repository did not exist, except for a per-repository + failure report in a separate section of the response body. This makes it + impossible to paginate the results properly in the presence of failures. In + versions 8.15.0 and later this API's failure handling behaviour has been + made stricter, reporting an overall failure if any targeted repository's + contents cannot be listed. + notable: true diff --git a/docs/changelog/107415.yaml b/docs/changelog/107415.yaml new file mode 100644 index 0000000000000..8877d0426c60d --- /dev/null +++ b/docs/changelog/107415.yaml @@ -0,0 +1,6 @@ +pr: 107415 +summary: Fix `DecayFunctions'` `toString` +area: Search +type: bug +issues: + - 100870 diff --git a/docs/changelog/108395.yaml b/docs/changelog/108395.yaml new file mode 100644 index 0000000000000..c33cf169a99fa --- /dev/null +++ b/docs/changelog/108395.yaml @@ -0,0 +1,5 @@ +pr: 108395 +summary: "ESQL: change from quoting from backtick to quote" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/108606.yaml b/docs/changelog/108606.yaml new file mode 100644 index 0000000000000..04780bff58800 --- /dev/null +++ b/docs/changelog/108606.yaml @@ -0,0 +1,14 @@ +pr: 108606 +summary: "Extend ISO8601 datetime parser to specify forbidden fields, allowing it to be used\ + \ on more formats" +area: Infra/Core +type: enhancement +issues: [] +highlight: + title: New custom parser for more ISO-8601 date formats + body: |- + Following on from #106486, this extends the custom ISO-8601 datetime parser to cover the `strict_year`, + `strict_year_month`, `strict_date_time`, `strict_date_time_no_millis`, `strict_date_hour_minute_second`, + `strict_date_hour_minute_second_millis`, and `strict_date_hour_minute_second_fraction` date formats. + As before, the parser will use the existing java.time parser if there are parsing issues, and the + `es.datetime.java_time_parsers=true` JVM property will force the use of the old parsers regardless. diff --git a/docs/changelog/108733.yaml b/docs/changelog/108733.yaml new file mode 100644 index 0000000000000..76a969219ea4c --- /dev/null +++ b/docs/changelog/108733.yaml @@ -0,0 +1,5 @@ +pr: 108733 +summary: Query Roles API +area: Security +type: feature +issues: [] diff --git a/docs/changelog/108764.yaml b/docs/changelog/108764.yaml new file mode 100644 index 0000000000000..94de27eb52c9b --- /dev/null +++ b/docs/changelog/108764.yaml @@ -0,0 +1,6 @@ +pr: 108764 +summary: ST_DISTANCE Function +area: ES|QL +type: enhancement +issues: + - 108212 diff --git a/docs/changelog/109084.yaml b/docs/changelog/109084.yaml new file mode 100644 index 0000000000000..67ff5610c5a66 --- /dev/null +++ b/docs/changelog/109084.yaml @@ -0,0 +1,5 @@ +pr: 109084 +summary: Add AVX-512 optimised vector distance functions for int7 on x64 +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/109341.yaml b/docs/changelog/109341.yaml deleted file mode 100644 index 0c1eaa98a8aa2..0000000000000 --- a/docs/changelog/109341.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109341 -summary: Re-define `index.mapper.dynamic` setting in 8.x for a better 7.x to 8.x upgrade if this setting is used. -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/109395.yaml b/docs/changelog/109395.yaml new file mode 100644 index 0000000000000..e5336695afa48 --- /dev/null +++ b/docs/changelog/109395.yaml @@ -0,0 +1,5 @@ +pr: 109395 +summary: Correct positioning for unique token filter +area: Analysis +type: bug +issues: [] diff --git a/docs/changelog/109492.yaml b/docs/changelog/109492.yaml deleted file mode 100644 index d4d1e83eb7786..0000000000000 --- a/docs/changelog/109492.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109492 -summary: Add hexstring support byte painless scorers -area: Search -type: bug -issues: [] diff --git a/docs/changelog/109500.yaml b/docs/changelog/109500.yaml deleted file mode 100644 index cfd6bc770d5d6..0000000000000 --- a/docs/changelog/109500.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109500 -summary: Guard file settings readiness on file settings support -area: Infra/Settings -type: bug -issues: [] diff --git a/docs/changelog/109501.yaml b/docs/changelog/109501.yaml new file mode 100644 index 0000000000000..6e81f98816cbf --- /dev/null +++ b/docs/changelog/109501.yaml @@ -0,0 +1,14 @@ +pr: 109501 +summary: Reflect latest changes in synthetic source documentation +area: Mapping +type: enhancement +issues: [] +highlight: + title: Synthetic `_source` improvements + body: |- + There are multiple improvements to synthetic `_source` functionality: + + * Synthetic `_source` is now supported for all field types including `nested` and `object`. `object` fields are supported with `enabled` set to `false`. + + * Synthetic `_source` can be enabled together with `ignore_malformed` and `ignore_above` parameters for all field types that support them. + notable: false diff --git a/docs/changelog/109533.yaml b/docs/changelog/109533.yaml deleted file mode 100644 index 5720410e5f370..0000000000000 --- a/docs/changelog/109533.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109533 -summary: Fix IndexOutOfBoundsException during inference -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/109629.yaml b/docs/changelog/109629.yaml deleted file mode 100644 index c468388117b72..0000000000000 --- a/docs/changelog/109629.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109629 -summary: "[Data streams] Fix the description of the lazy rollover task" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/109632.yaml b/docs/changelog/109632.yaml deleted file mode 100644 index 6b04160bbdbec..0000000000000 --- a/docs/changelog/109632.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109632 -summary: Force execute inactive sink reaper -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/109636.yaml b/docs/changelog/109636.yaml deleted file mode 100644 index f8f73a75dfd3d..0000000000000 --- a/docs/changelog/109636.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109636 -summary: "Ensure a lazy rollover request will rollover the target data stream once." -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/109667.yaml b/docs/changelog/109667.yaml new file mode 100644 index 0000000000000..782a1b1cf6c9b --- /dev/null +++ b/docs/changelog/109667.yaml @@ -0,0 +1,5 @@ +pr: 109667 +summary: Inference autoscaling +area: Machine Learning +type: feature +issues: [] diff --git a/docs/changelog/109684.yaml b/docs/changelog/109684.yaml new file mode 100644 index 0000000000000..156f568290cf5 --- /dev/null +++ b/docs/changelog/109684.yaml @@ -0,0 +1,5 @@ +pr: 109684 +summary: Avoid `ModelAssignment` deadlock +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/109695.yaml b/docs/changelog/109695.yaml deleted file mode 100644 index f922b76412676..0000000000000 --- a/docs/changelog/109695.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109695 -summary: Fix ESQL cancellation for exchange requests -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/109807.yaml b/docs/changelog/109807.yaml new file mode 100644 index 0000000000000..5cf8a2c896c4e --- /dev/null +++ b/docs/changelog/109807.yaml @@ -0,0 +1,6 @@ +pr: 109807 +summary: "ESQL: Fix LOOKUP attribute shadowing" +area: ES|QL +type: bug +issues: + - 109392 diff --git a/docs/changelog/109813.yaml b/docs/changelog/109813.yaml new file mode 100644 index 0000000000000..edcef17e87606 --- /dev/null +++ b/docs/changelog/109813.yaml @@ -0,0 +1,5 @@ +pr: 109813 +summary: Add text similarity reranker retriever +area: Ranking +type: feature +issues: [] diff --git a/docs/changelog/109824.yaml b/docs/changelog/109824.yaml deleted file mode 100644 index 987e8c0a8b1a2..0000000000000 --- a/docs/changelog/109824.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109824 -summary: Check array size before returning array item in script doc values -area: Infra/Scripting -type: bug -issues: - - 104998 diff --git a/docs/changelog/109850.yaml b/docs/changelog/109850.yaml deleted file mode 100644 index 0f11318765aea..0000000000000 --- a/docs/changelog/109850.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109850 -summary: Ensure tasks preserve versions in `MasterService` -area: Cluster Coordination -type: bug -issues: [] diff --git a/docs/changelog/109873.yaml b/docs/changelog/109873.yaml new file mode 100644 index 0000000000000..c77197cc22d0a --- /dev/null +++ b/docs/changelog/109873.yaml @@ -0,0 +1,5 @@ +pr: 109873 +summary: "ESQL: add Arrow dataframes output format" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/109876.yaml b/docs/changelog/109876.yaml new file mode 100644 index 0000000000000..4a65b4e17c4a3 --- /dev/null +++ b/docs/changelog/109876.yaml @@ -0,0 +1,6 @@ +pr: 109876 +summary: Always pick the user `maxPageSize` value +area: Transform +type: bug +issues: + - 109844 diff --git a/docs/changelog/109880.yaml b/docs/changelog/109880.yaml new file mode 100644 index 0000000000000..71c7209824a8a --- /dev/null +++ b/docs/changelog/109880.yaml @@ -0,0 +1,10 @@ +pr: 109880 +summary: Deprecate `text_expansion` and `weighted_tokens` queries +area: Machine Learning +type: deprecation +issues: [ ] +deprecation: + title: Deprecate `text_expansion` and `weighted_tokens` queries + area: REST API + details: The `text_expansion` and `weighted_tokens` queries have been replaced by `sparse_vector`. + impact: Please update your existing `text_expansion` and `weighted_tokens` queries to use `sparse_vector.` diff --git a/docs/changelog/109893.yaml b/docs/changelog/109893.yaml new file mode 100644 index 0000000000000..df6d6e51236c8 --- /dev/null +++ b/docs/changelog/109893.yaml @@ -0,0 +1,5 @@ +pr: 109893 +summary: Add Anthropic messages integration to Inference API +area: Machine Learning +type: enhancement +issues: [ ] diff --git a/docs/changelog/109908.yaml b/docs/changelog/109908.yaml new file mode 100644 index 0000000000000..cdf2acf17096c --- /dev/null +++ b/docs/changelog/109908.yaml @@ -0,0 +1,5 @@ +pr: 109908 +summary: "Update checkpoints after post-replication actions, even on failure" +area: CRUD +type: bug +issues: [] diff --git a/docs/changelog/109957.yaml b/docs/changelog/109957.yaml new file mode 100644 index 0000000000000..6bbcd8175501c --- /dev/null +++ b/docs/changelog/109957.yaml @@ -0,0 +1,6 @@ +pr: 109957 +summary: Add request metric to `RestController` to track success/failure (by status + code) +area: Infra/Metrics +type: enhancement +issues: [] diff --git a/docs/changelog/109963.yaml b/docs/changelog/109963.yaml new file mode 100644 index 0000000000000..1745d549582d4 --- /dev/null +++ b/docs/changelog/109963.yaml @@ -0,0 +1,6 @@ +pr: 109963 +summary: Propagate mapper builder context flags across nested mapper builder context + creation +area: Mapping +type: bug +issues: [] diff --git a/docs/changelog/109967.yaml b/docs/changelog/109967.yaml new file mode 100644 index 0000000000000..cfc6b6462954b --- /dev/null +++ b/docs/changelog/109967.yaml @@ -0,0 +1,5 @@ +pr: 109967 +summary: Default the HF service to cosine similarity +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/109981.yaml b/docs/changelog/109981.yaml new file mode 100644 index 0000000000000..cf9388f79e29c --- /dev/null +++ b/docs/changelog/109981.yaml @@ -0,0 +1,5 @@ +pr: 109981 +summary: Limit number of synonym rules that can be created +area: Mapping +type: bug +issues: [108785] diff --git a/docs/changelog/109989.yaml b/docs/changelog/109989.yaml new file mode 100644 index 0000000000000..f1f5972b60eb3 --- /dev/null +++ b/docs/changelog/109989.yaml @@ -0,0 +1,5 @@ +pr: 109989 +summary: "ESQL: Fix Join references" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/109993.yaml b/docs/changelog/109993.yaml new file mode 100644 index 0000000000000..40d161b6b5c24 --- /dev/null +++ b/docs/changelog/109993.yaml @@ -0,0 +1,5 @@ +pr: 109993 +summary: "[ES|QL] `weighted_avg`" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/110004.yaml b/docs/changelog/110004.yaml new file mode 100644 index 0000000000000..f680016527a9c --- /dev/null +++ b/docs/changelog/110004.yaml @@ -0,0 +1,11 @@ +pr: 110004 +summary: Mark Query Rules as GA +area: Relevance +type: feature +issues: [] +highlight: + title: Mark Query Rules as GA + body: |- + This PR marks query rules as Generally Available. All APIs are no longer + in tech preview. + notable: true diff --git a/docs/changelog/110016.yaml b/docs/changelog/110016.yaml new file mode 100644 index 0000000000000..28ad55aa796c8 --- /dev/null +++ b/docs/changelog/110016.yaml @@ -0,0 +1,5 @@ +pr: 110016 +summary: Opt in keyword field into fallback synthetic source if needed +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/110019.yaml b/docs/changelog/110019.yaml new file mode 100644 index 0000000000000..632e79008d351 --- /dev/null +++ b/docs/changelog/110019.yaml @@ -0,0 +1,6 @@ +pr: 110019 +summary: Improve mechanism for extracting the result of a `PlainActionFuture` +area: Distributed +type: enhancement +issues: + - 108125 diff --git a/docs/changelog/110021.yaml b/docs/changelog/110021.yaml new file mode 100644 index 0000000000000..51878b960dfd0 --- /dev/null +++ b/docs/changelog/110021.yaml @@ -0,0 +1,6 @@ +pr: 110021 +summary: "[ES|QL] validate `mv_sort` order" +area: ES|QL +type: bug +issues: + - 109910 diff --git a/docs/changelog/110046.yaml b/docs/changelog/110046.yaml new file mode 100644 index 0000000000000..6ebe440e7aced --- /dev/null +++ b/docs/changelog/110046.yaml @@ -0,0 +1,6 @@ +pr: 110046 +summary: "ESQL: make named params objects truly per request" +area: ES|QL +type: bug +issues: + - 110028 diff --git a/docs/changelog/110059.yaml b/docs/changelog/110059.yaml new file mode 100644 index 0000000000000..ba160c091cdc2 --- /dev/null +++ b/docs/changelog/110059.yaml @@ -0,0 +1,32 @@ +pr: 110059 +summary: Adds new `bit` `element_type` for `dense_vectors` +area: Vector Search +type: feature +issues: [] +highlight: + title: Adds new `bit` `element_type` for `dense_vectors` + body: |- + This adds `bit` vector support by adding `element_type: bit` for + vectors. This new element type works for indexed and non-indexed + vectors. Additionally, it works with `hnsw` and `flat` index types. No + quantization based codec works with this element type, this is + consistent with `byte` vectors. + + `bit` vectors accept up to `32768` dimensions in size and expect vectors + that are being indexed to be encoded either as a hexidecimal string or a + `byte[]` array where each element of the `byte` array represents `8` + bits of the vector. + + `bit` vectors support script usage and regular query usage. When + indexed, all comparisons done are `xor` and `popcount` summations (aka, + hamming distance), and the scores are transformed and normalized given + the vector dimensions. + + For scripts, `l1norm` is the same as `hamming` distance and `l2norm` is + `sqrt(l1norm)`. `dotProduct` and `cosineSimilarity` are not supported. + + Note, the dimensions expected by this element_type are always to be + divisible by `8`, and the `byte[]` vectors provided for index must be + have size `dim/8` size, where each byte element represents `8` bits of + the vectors. + notable: true diff --git a/docs/changelog/110061.yaml b/docs/changelog/110061.yaml new file mode 100644 index 0000000000000..1880a2a197722 --- /dev/null +++ b/docs/changelog/110061.yaml @@ -0,0 +1,6 @@ +pr: 110061 +summary: Avoiding running watch jobs in TickerScheduleTriggerEngine if it is paused +area: Watcher +type: bug +issues: + - 105933 diff --git a/docs/changelog/110066.yaml b/docs/changelog/110066.yaml new file mode 100644 index 0000000000000..920c6304b63ae --- /dev/null +++ b/docs/changelog/110066.yaml @@ -0,0 +1,6 @@ +pr: 110066 +summary: Support flattened fields and multi-fields as dimensions in downsampling +area: Downsampling +type: bug +issues: + - 99297 diff --git a/docs/changelog/110096.yaml b/docs/changelog/110096.yaml new file mode 100644 index 0000000000000..3d6616c289266 --- /dev/null +++ b/docs/changelog/110096.yaml @@ -0,0 +1,6 @@ +pr: 110096 +summary: Fix `ClassCastException` with MV_EXPAND on missing field +area: ES|QL +type: bug +issues: + - 109974 diff --git a/docs/changelog/110102.yaml b/docs/changelog/110102.yaml new file mode 100644 index 0000000000000..d1b9b53e2dfc5 --- /dev/null +++ b/docs/changelog/110102.yaml @@ -0,0 +1,6 @@ +pr: 110102 +summary: Optimize ST_DISTANCE filtering with Lucene circle intersection query +area: ES|QL +type: enhancement +issues: + - 109972 diff --git a/docs/changelog/110112.yaml b/docs/changelog/110112.yaml new file mode 100644 index 0000000000000..eca5fd9af15ce --- /dev/null +++ b/docs/changelog/110112.yaml @@ -0,0 +1,5 @@ +pr: 110112 +summary: Increase response size limit for batched requests +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/110146.yaml b/docs/changelog/110146.yaml new file mode 100644 index 0000000000000..61ba35cec319b --- /dev/null +++ b/docs/changelog/110146.yaml @@ -0,0 +1,5 @@ +pr: 110146 +summary: Fix trailing slash in `ml.get_categories` specification +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/110160.yaml b/docs/changelog/110160.yaml new file mode 100644 index 0000000000000..0c38c23c69067 --- /dev/null +++ b/docs/changelog/110160.yaml @@ -0,0 +1,5 @@ +pr: 110160 +summary: Opt in number fields into fallback synthetic source when doc values a… +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/110176.yaml b/docs/changelog/110176.yaml new file mode 100644 index 0000000000000..ae1d7d10d6dc4 --- /dev/null +++ b/docs/changelog/110176.yaml @@ -0,0 +1,5 @@ +pr: 110176 +summary: Fix trailing slash in two rollup specifications +area: Rollup +type: bug +issues: [] diff --git a/docs/changelog/110177.yaml b/docs/changelog/110177.yaml new file mode 100644 index 0000000000000..0ac5328d88df4 --- /dev/null +++ b/docs/changelog/110177.yaml @@ -0,0 +1,5 @@ +pr: 110177 +summary: Fix trailing slash in `security.put_privileges` specification +area: Authorization +type: bug +issues: [] diff --git a/docs/changelog/110179.yaml b/docs/changelog/110179.yaml new file mode 100644 index 0000000000000..b99a390c8586f --- /dev/null +++ b/docs/changelog/110179.yaml @@ -0,0 +1,6 @@ +pr: 110179 +summary: Make repository analysis API available to non-operators +area: Snapshot/Restore +type: enhancement +issues: + - 100318 diff --git a/docs/changelog/110186.yaml b/docs/changelog/110186.yaml new file mode 100644 index 0000000000000..23eaab118e2ab --- /dev/null +++ b/docs/changelog/110186.yaml @@ -0,0 +1,6 @@ +pr: 110186 +summary: Don't sample calls to `ReduceContext#consumeBucketsAndMaybeBreak` ins `InternalDateHistogram` + and `InternalHistogram` during reduction +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/110201.yaml b/docs/changelog/110201.yaml new file mode 100644 index 0000000000000..a880638881948 --- /dev/null +++ b/docs/changelog/110201.yaml @@ -0,0 +1,6 @@ +pr: 110201 +summary: "ES|QL: Fix DISSECT that overwrites input" +area: ES|QL +type: bug +issues: + - 110184 diff --git a/docs/changelog/110214.yaml b/docs/changelog/110214.yaml new file mode 100644 index 0000000000000..20f61cac64454 --- /dev/null +++ b/docs/changelog/110214.yaml @@ -0,0 +1,5 @@ +pr: 110214 +summary: Handle `ignore_above` in synthetic source for flattened fields +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/110233.yaml b/docs/changelog/110233.yaml new file mode 100644 index 0000000000000..d9ce4057090a4 --- /dev/null +++ b/docs/changelog/110233.yaml @@ -0,0 +1,6 @@ +pr: 110233 +summary: Support k parameter for knn query +area: Vector Search +type: enhancement +issues: + - 108473 diff --git a/docs/changelog/110234.yaml b/docs/changelog/110234.yaml new file mode 100644 index 0000000000000..0656ba5fb6636 --- /dev/null +++ b/docs/changelog/110234.yaml @@ -0,0 +1,5 @@ +pr: 110234 +summary: Upgrade to Lucene-9.11.1 +area: Search +type: upgrade +issues: [] diff --git a/docs/changelog/110236.yaml b/docs/changelog/110236.yaml new file mode 100644 index 0000000000000..e2dbff7fbf768 --- /dev/null +++ b/docs/changelog/110236.yaml @@ -0,0 +1,21 @@ +pr: 110236 +summary: '`ParseHeapRatioOrDeprecatedByteSizeValue` for `indices.breaker.total.limit`' +area: Infra/Settings +type: deprecation +issues: [] +deprecation: + title: 'Deprecate absolute size values for `indices.breaker.total.limit` setting' + area: Cluster and node setting + details: Previously, the value of `indices.breaker.total.limit` could be specified as + an absolute size in bytes. This setting controls the overal amount of + memory the server is allowed to use before taking remedial actions. Setting + this to a specific number of bytes led to strange behaviour when the node + maximum heap size changed because the circut breaker limit would remain + unchanged. This would either leave the value too low, causing part of the + heap to remain unused; or it would leave the value too high, causing the + circuit breaker to be ineffective at preventing OOM errors. The only + reasonable behaviour for this setting is that it scales with the size of + the heap, and so absolute byte limits are now deprecated. + impact: Users must change their configuration to specify a percentage instead of + an absolute number of bytes for `indices.breaker.total.limit`, or else + accept the default, which is already specified as a percentage. diff --git a/docs/changelog/110251.yaml b/docs/changelog/110251.yaml new file mode 100644 index 0000000000000..a3b0c3128be35 --- /dev/null +++ b/docs/changelog/110251.yaml @@ -0,0 +1,13 @@ +pr: 110251 +summary: Support index sorting with nested fields +area: Logs +type: enhancement +issues: + - 107349 +highlight: + title: Index sorting on indexes with nested fields + body: |- + Index sorting is now supported for indexes with mappings containing nested objects. + The index sort spec (as specified by `index.sort.field`) can't contain any nested + fields, still. + notable: false diff --git a/docs/changelog/110334.yaml b/docs/changelog/110334.yaml new file mode 100644 index 0000000000000..f83ac04ded773 --- /dev/null +++ b/docs/changelog/110334.yaml @@ -0,0 +1,5 @@ +pr: 110334 +summary: Sentence Chunker +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/110337.yaml b/docs/changelog/110337.yaml new file mode 100644 index 0000000000000..bf21a95c9157f --- /dev/null +++ b/docs/changelog/110337.yaml @@ -0,0 +1,5 @@ +pr: 110337 +summary: Support `ignore_above` on keyword dimensions +area: TSDB +type: enhancement +issues: [] diff --git a/docs/changelog/110338.yaml b/docs/changelog/110338.yaml new file mode 100644 index 0000000000000..2334a1cbc9283 --- /dev/null +++ b/docs/changelog/110338.yaml @@ -0,0 +1,5 @@ +pr: 110338 +summary: Add `semantic_text` field type and `semantic` query +area: Mapping +type: feature +issues: [] diff --git a/docs/changelog/110347.yaml b/docs/changelog/110347.yaml new file mode 100644 index 0000000000000..8727128230935 --- /dev/null +++ b/docs/changelog/110347.yaml @@ -0,0 +1,5 @@ +pr: 110347 +summary: "ESQL: Renamed `TopList` to Top" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/110352.yaml b/docs/changelog/110352.yaml new file mode 100644 index 0000000000000..7dad1ce5f6dd4 --- /dev/null +++ b/docs/changelog/110352.yaml @@ -0,0 +1,5 @@ +pr: 110352 +summary: Search coordinator uses `event.ingested` in cluster state to do rewrites +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/110361.yaml b/docs/changelog/110361.yaml new file mode 100644 index 0000000000000..8558c88e06049 --- /dev/null +++ b/docs/changelog/110361.yaml @@ -0,0 +1,7 @@ +pr: 110361 +summary: Don't detect `PlainActionFuture` deadlock on concurrent complete +area: Distributed +type: bug +issues: + - 110181 + - 110360 diff --git a/docs/changelog/110369.yaml b/docs/changelog/110369.yaml new file mode 100644 index 0000000000000..770294605b444 --- /dev/null +++ b/docs/changelog/110369.yaml @@ -0,0 +1,6 @@ +pr: 110369 +summary: Run terms concurrently when cardinality is only lower than shard size +area: Aggregations +type: bug +issues: + - 105505 diff --git a/docs/changelog/110383.yaml b/docs/changelog/110383.yaml new file mode 100644 index 0000000000000..5e9bddd4bfcd2 --- /dev/null +++ b/docs/changelog/110383.yaml @@ -0,0 +1,5 @@ +pr: 110383 +summary: Add bulk delete roles API +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/110391.yaml b/docs/changelog/110391.yaml new file mode 100644 index 0000000000000..1e00eda970398 --- /dev/null +++ b/docs/changelog/110391.yaml @@ -0,0 +1,6 @@ +pr: 110391 +summary: Fix ST_DISTANCE Lucene push-down for complex predicates +area: ES|QL +type: bug +issues: + - 110349 diff --git a/docs/changelog/110395.yaml b/docs/changelog/110395.yaml new file mode 100644 index 0000000000000..690be55abb5b2 --- /dev/null +++ b/docs/changelog/110395.yaml @@ -0,0 +1,9 @@ +pr: 110395 +summary: Mark the Redact processor as Generally Available +area: Ingest Node +type: feature +issues: [] +highlight: + title: The Redact processor is Generally Available + body: The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. The Redact processor was initially released as Technical Preview in `8.7.0`, and is now released as Generally Available. + notable: true diff --git a/docs/changelog/110399.yaml b/docs/changelog/110399.yaml new file mode 100644 index 0000000000000..9e04e2656809e --- /dev/null +++ b/docs/changelog/110399.yaml @@ -0,0 +1,6 @@ +pr: 110399 +summary: "[Inference API] Prevent inference endpoints from being deleted if they are\ + \ referenced by semantic text" +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/110427.yaml b/docs/changelog/110427.yaml new file mode 100644 index 0000000000000..ba8a1246e90e4 --- /dev/null +++ b/docs/changelog/110427.yaml @@ -0,0 +1,6 @@ +pr: 110427 +summary: "[Inference API] Remove unused Cohere rerank service settings fields in a\ + \ BWC way" +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/110431.yaml b/docs/changelog/110431.yaml new file mode 100644 index 0000000000000..0dd93ef718ef9 --- /dev/null +++ b/docs/changelog/110431.yaml @@ -0,0 +1,5 @@ +pr: 110431 +summary: "[Inference API] Fix serialization for inference delete endpoint response" +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/110476.yaml b/docs/changelog/110476.yaml new file mode 100644 index 0000000000000..bc12b3711a366 --- /dev/null +++ b/docs/changelog/110476.yaml @@ -0,0 +1,7 @@ +pr: 110476 +summary: Fix bug in union-types with type-casting in grouping key of STATS +area: ES|QL +type: bug +issues: + - 109922 + - 110477 diff --git a/docs/changelog/110520.yaml b/docs/changelog/110520.yaml new file mode 100644 index 0000000000000..fba4b84e2279e --- /dev/null +++ b/docs/changelog/110520.yaml @@ -0,0 +1,5 @@ +pr: 110520 +summary: Add protection for OOM during aggregations partial reduction +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/110527.yaml b/docs/changelog/110527.yaml new file mode 100644 index 0000000000000..3ab19ecaaaa76 --- /dev/null +++ b/docs/changelog/110527.yaml @@ -0,0 +1,5 @@ +pr: 110527 +summary: "ESQL: Add boolean support to Max and Min aggs" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/110540.yaml b/docs/changelog/110540.yaml new file mode 100644 index 0000000000000..5e4994da80704 --- /dev/null +++ b/docs/changelog/110540.yaml @@ -0,0 +1,16 @@ +pr: 110540 +summary: Deprecate using slm privileges to access ilm +area: ILM+SLM +type: deprecation +issues: [] +deprecation: + title: Deprecate using slm privileges to access ilm + area: REST API + details: The `read_slm` privilege can get the ILM status, and + the `manage_slm` privilege can start and stop ILM. Access to these + APIs should be granted using the `read_ilm` and `manage_ilm` privileges + instead. Access to ILM APIs will be removed from SLM privileges in + a future major release, and is now deprecated. + impact: Users that need access to the ILM status API should now + use the `read_ilm` privilege. Users that need to start and stop ILM, + should use the `manage_ilm` privilege. diff --git a/docs/changelog/110554.yaml b/docs/changelog/110554.yaml new file mode 100644 index 0000000000000..8c0b896a4c979 --- /dev/null +++ b/docs/changelog/110554.yaml @@ -0,0 +1,5 @@ +pr: 110554 +summary: Fix `MapperBuilderContext#isDataStream` when used in dynamic mappers +area: "Mapping" +type: bug +issues: [] diff --git a/docs/changelog/110574.yaml b/docs/changelog/110574.yaml new file mode 100644 index 0000000000000..1840838500151 --- /dev/null +++ b/docs/changelog/110574.yaml @@ -0,0 +1,6 @@ +pr: 110574 +summary: "ES|QL: better validation for GROK patterns" +area: ES|QL +type: bug +issues: + - 110533 diff --git a/docs/changelog/110578.yaml b/docs/changelog/110578.yaml new file mode 100644 index 0000000000000..5d48171e4f328 --- /dev/null +++ b/docs/changelog/110578.yaml @@ -0,0 +1,5 @@ +pr: 110578 +summary: Add `size_in_bytes` to enrich cache stats +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/110586.yaml b/docs/changelog/110586.yaml new file mode 100644 index 0000000000000..cc2bcb85a2dac --- /dev/null +++ b/docs/changelog/110586.yaml @@ -0,0 +1,5 @@ +pr: 110586 +summary: "ESQL: Fix Max doubles bug with negatives and add tests for Max and Min" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/110603.yaml b/docs/changelog/110603.yaml new file mode 100644 index 0000000000000..4ba19985853df --- /dev/null +++ b/docs/changelog/110603.yaml @@ -0,0 +1,6 @@ +pr: 110603 +summary: Stop iterating over all fields to extract @timestamp value +area: TSDB +type: enhancement +issues: + - 92297 diff --git a/docs/changelog/110651.yaml b/docs/changelog/110651.yaml new file mode 100644 index 0000000000000..c25c63ee0284a --- /dev/null +++ b/docs/changelog/110651.yaml @@ -0,0 +1,5 @@ +pr: 110651 +summary: "Remove `default_field: message` from metrics index templates" +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/110665.yaml b/docs/changelog/110665.yaml new file mode 100644 index 0000000000000..fa6db3190fe60 --- /dev/null +++ b/docs/changelog/110665.yaml @@ -0,0 +1,6 @@ +pr: 110665 +summary: "[ESQL] Fix parsing of large magnitude negative numbers" +area: ES|QL +type: bug +issues: + - 104323 diff --git a/docs/changelog/110666.yaml b/docs/changelog/110666.yaml new file mode 100644 index 0000000000000..d96f8e2024c81 --- /dev/null +++ b/docs/changelog/110666.yaml @@ -0,0 +1,5 @@ +pr: 110666 +summary: Removing the use of Stream::peek from `GeoIpDownloader::cleanDatabases` +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/110676.yaml b/docs/changelog/110676.yaml new file mode 100644 index 0000000000000..efe7e0e55f18f --- /dev/null +++ b/docs/changelog/110676.yaml @@ -0,0 +1,5 @@ +pr: 110676 +summary: Allow querying `index_mode` +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/110707.yaml b/docs/changelog/110707.yaml new file mode 100644 index 0000000000000..e13688c73c743 --- /dev/null +++ b/docs/changelog/110707.yaml @@ -0,0 +1,5 @@ +pr: 110707 +summary: Fix issue with returning incomplete fragment for plain highlighter +area: Highlighting +type: bug +issues: [] diff --git a/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc index 5afed11923a2a..f88cad3296282 100644 --- a/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc @@ -78,7 +78,7 @@ following output: "start_offset" : 24, "end_offset" : 28, "type" : "word", - "position" : 4 + "position" : 5 } ] } diff --git a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc index 217b29451937d..730dad852adee 100644 --- a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc +++ b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc @@ -31,7 +31,7 @@ To get started with Connector APIs, check out the {enterprise-search-ref}/connec (Optional, integer) The offset from the first result to fetch. Defaults to `0`. `status`:: -(Optional, job status) A comma-separated list of job statuses to filter the results. Available statuses include: `canceling`, `canceled`, `completed`, `error`, `in_progress`, `pending`, `suspended`. +(Optional, job status) A job status to filter the results for. Available statuses include: `canceling`, `canceled`, `completed`, `error`, `in_progress`, `pending`, `suspended`. `connector_id`:: (Optional, string) The connector id the fetched sync jobs need to have. diff --git a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc index 4dd9cc6e67ab2..1427269d22b86 100644 --- a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc +++ b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc @@ -53,6 +53,9 @@ This API is mainly used by the connector service for updating sync job informati `last_seen`:: (Optional, instant) The timestamp to set the connector sync job's `last_seen` property. +`metadata`:: +(Optional, object) The connector-specific metadata. + [[set-connector-sync-job-stats-api-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/connector/apis/update-connector-error-api.asciidoc b/docs/reference/connector/apis/update-connector-error-api.asciidoc index 67ea6b6d17cf0..c6ac0c9a1ac22 100644 --- a/docs/reference/connector/apis/update-connector-error-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-error-api.asciidoc @@ -21,6 +21,11 @@ To get started with Connector APIs, check out the {enterprise-search-ref}/connec * To sync data using self-managed connectors, you need to deploy the {enterprise-search-ref}/build-connector.html[Elastic connector service] on your own infrastructure. This service runs automatically on Elastic Cloud for native connectors. * The `connector_id` parameter should reference an existing connector. +[[update-connector-error-api-desc]] +==== {api-description-title} + +Sets the `error` field for the specified connector. If the `error` provided in the request body is non-null, the connector's status is updated to `error`. Otherwise, if the `error` is reset to null, the connector status is updated to `connected`. + [[update-connector-error-api-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/data-streams/downsampling.asciidoc b/docs/reference/data-streams/downsampling.asciidoc index b005e83e8c95d..0b08b0972f9a1 100644 --- a/docs/reference/data-streams/downsampling.asciidoc +++ b/docs/reference/data-streams/downsampling.asciidoc @@ -18,9 +18,9 @@ Metrics solutions collect large amounts of time series data that grow over time. As that data ages, it becomes less relevant to the current state of the system. The downsampling process rolls up documents within a fixed time interval into a single summary document. Each summary document includes statistical -representations of the original data: the `min`, `max`, `sum`, `value_count`, -and `average` for each metric. Data stream <> are stored unchanged. +representations of the original data: the `min`, `max`, `sum` and `value_count` +for each metric. Data stream <> +are stored unchanged. Downsampling, in effect, lets you to trade data resolution and precision for storage size. You can include it in an <>, and as a result is -subject to a number of <>. +subject to some <> and <> applied to the `_source` field. NOTE: A time series index can contain fields other than dimensions or metrics. @@ -109,19 +110,6 @@ parameter: For a flattened field, use the `time_series_dimensions` parameter to configure an array of fields as dimensions. For details refer to <>. -[[dimension-limits]] -.Dimension limits -**** -In a TSDS, {es} uses dimensions to -generate the document `_id` and <> values. The resulting `_id` is -always a short encoded hash. To prevent the `_tsid` value from being overly -large, {es} limits the number of dimensions for an index using the -<> -index setting. While you can increase this limit, the resulting document `_tsid` -value can't exceed 32KB. Additionally the field name of a dimension cannot be -longer than 512 bytes and the each dimension value can't exceed 1kb. -**** - [discrete] [[time-series-metric]] ==== Metrics @@ -290,11 +278,6 @@ created the initial backing index has: Only data that falls inside that range can be indexed. -In our <>, -`index.look_ahead_time` is set to three hours, so only documents with a -`@timestamp` value that is within three hours previous or subsequent to the -present time are accepted for indexing. - You can use the <> to check the accepted time range for writing to any TSDS. diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 9d359fd7d7f02..ccc8e67f39bc0 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -211,7 +211,7 @@ creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. For more information about field mapping, see <> and the <> API. -Automatic index creation is controlled by the `action.auto_create_index` +Automatic index creation is controlled by the <> setting. This setting defaults to `true`, which allows any index to be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns, or set it to diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index 2cdd97ceab176..e8cfa03e3ee88 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -75,6 +75,11 @@ For syntax, refer to <>. (Optional, array) Values for parameters in the `query`. For syntax, refer to <>. +`profile`:: +(Optional, boolean) If provided and `true` the response will include an extra `profile` object +with information about how the query was executed. It provides insight into the performance +of each part of the query. This is for human debugging as the object's format might change at any time. + `query`:: (Required, string) {esql} query to run. For syntax, refer to <>. @@ -100,3 +105,8 @@ returned if `drop_null_columns` is sent with the request. `rows`:: (array of arrays) Values for the search results. + +`profile`:: +(object) +Profile describing the execution of the query. Only returned if `profile` was sent in the body. +The object itself is for human debugging and can change at any time. diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc index de2b6dedd8776..5b90e96d7a734 100644 --- a/docs/reference/esql/esql-rest.asciidoc +++ b/docs/reference/esql/esql-rest.asciidoc @@ -111,6 +111,9 @@ s|Description |{wikipedia}/Smile_(data_interchange_format)[Smile] binary data format similar to CBOR +|arrow +|application/vnd.apache.arrow.stream +|**Experimental.** https://arrow.apache.org/[Apache Arrow] dataframes, https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format[IPC streaming format] |=== The `csv` format accepts a formatting URL query attribute, `delimiter`, which diff --git a/docs/reference/esql/functions/aggregation-functions.asciidoc b/docs/reference/esql/functions/aggregation-functions.asciidoc index 074fcce9ad43d..82931b84fd44a 100644 --- a/docs/reference/esql/functions/aggregation-functions.asciidoc +++ b/docs/reference/esql/functions/aggregation-functions.asciidoc @@ -8,27 +8,31 @@ The <> command supports these aggregate functions: // tag::agg_list[] -* <> +* <> * <> * <> -* <> +* <> * <> * <> -* <> +* <> * <> * experimental:[] <> * <> +* <> * <> +* experimental:[] <> // end::agg_list[] -include::avg.asciidoc[] include::count.asciidoc[] include::count-distinct.asciidoc[] -include::max.asciidoc[] include::median.asciidoc[] include::median-absolute-deviation.asciidoc[] -include::min.asciidoc[] include::percentile.asciidoc[] include::st_centroid_agg.asciidoc[] include::sum.asciidoc[] +include::layout/avg.asciidoc[] +include::layout/max.asciidoc[] +include::layout/min.asciidoc[] +include::layout/top.asciidoc[] include::values.asciidoc[] +include::weighted-avg.asciidoc[] diff --git a/docs/reference/esql/functions/avg.asciidoc b/docs/reference/esql/functions/avg.asciidoc deleted file mode 100644 index 7eadff29f1bfc..0000000000000 --- a/docs/reference/esql/functions/avg.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -[discrete] -[[esql-agg-avg]] -=== `AVG` - -*Syntax* - -[source,esql] ----- -AVG(expression) ----- - -`expression`:: -Numeric expression. -//If `null`, the function returns `null`. -// TODO: Remove comment when https://github.com/elastic/elasticsearch/issues/104900 is fixed. - -*Description* - -The average of a numeric expression. - -*Supported types* - -The result is always a `double` no matter the input type. - -*Examples* - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=avg] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=avg-result] -|=== - -The expression can use inline functions. For example, to calculate the average -over a multivalued column, first use `MV_AVG` to average the multiple values per -row, and use the result with the `AVG` function: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=docsStatsAvgNestedExpression] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=docsStatsAvgNestedExpression-result] -|=== diff --git a/docs/reference/esql/functions/binary.asciidoc b/docs/reference/esql/functions/binary.asciidoc index 959bbe11c040e..72d466ae83d11 100644 --- a/docs/reference/esql/functions/binary.asciidoc +++ b/docs/reference/esql/functions/binary.asciidoc @@ -7,6 +7,12 @@ [.text-center] image::esql/functions/signature/equals.svg[Embedded,opts=inline] +Check if two fields are equal. If either field is <> then +the result is `null`. + +NOTE: This is pushed to the underlying search index if one side of the comparison is constant + and the other side is a field in the index that has both an <> and <>. + Supported types: include::types/equals.asciidoc[] @@ -15,6 +21,12 @@ include::types/equals.asciidoc[] [.text-center] image::esql/functions/signature/not_equals.svg[Embedded,opts=inline] +Check if two fields are unequal. If either field is <> then +the result is `null`. + +NOTE: This is pushed to the underlying search index if one side of the comparison is constant + and the other side is a field in the index that has both an <> and <>. + Supported types: include::types/not_equals.asciidoc[] @@ -23,55 +35,112 @@ include::types/not_equals.asciidoc[] [.text-center] image::esql/functions/signature/less_than.svg[Embedded,opts=inline] +Check if one field is less than another. If either field is <> +then the result is `null`. + +NOTE: This is pushed to the underlying search index if one side of the comparison is constant + and the other side is a field in the index that has both an <> and <>. + +Supported types: + include::types/less_than.asciidoc[] ==== Less than or equal to `<=` [.text-center] image::esql/functions/signature/less_than_or_equal.svg[Embedded,opts=inline] +Check if one field is less than or equal to another. If either field is <> +then the result is `null`. + +NOTE: This is pushed to the underlying search index if one side of the comparison is constant + and the other side is a field in the index that has both an <> and <>. + +Supported types: + include::types/less_than_or_equal.asciidoc[] ==== Greater than `>` [.text-center] image::esql/functions/signature/greater_than.svg[Embedded,opts=inline] +Check if one field is greater than another. If either field is <> +then the result is `null`. + +NOTE: This is pushed to the underlying search index if one side of the comparison is constant + and the other side is a field in the index that has both an <> and <>. + +Supported types: + include::types/greater_than.asciidoc[] ==== Greater than or equal to `>=` [.text-center] image::esql/functions/signature/greater_than_or_equal.svg[Embedded,opts=inline] +Check if one field is greater than or equal to another. If either field is <> +then the result is `null`. + +NOTE: This is pushed to the underlying search index if one side of the comparison is constant + and the other side is a field in the index that has both an <> and <>. + +Supported types: + include::types/greater_than_or_equal.asciidoc[] ==== Add `+` [.text-center] image::esql/functions/signature/add.svg[Embedded,opts=inline] +Add two numbers together. If either field is <> +then the result is `null`. + +Supported types: + include::types/add.asciidoc[] ==== Subtract `-` [.text-center] image::esql/functions/signature/sub.svg[Embedded,opts=inline] +Subtract one number from another. If either field is <> +then the result is `null`. + +Supported types: + include::types/sub.asciidoc[] ==== Multiply `*` [.text-center] image::esql/functions/signature/mul.svg[Embedded,opts=inline] +Multiply two numbers together. If either field is <> +then the result is `null`. + +Supported types: + include::types/mul.asciidoc[] ==== Divide `/` [.text-center] image::esql/functions/signature/div.svg[Embedded,opts=inline] +Divide one number by another. If either field is <> +then the result is `null`. + NOTE: Division of two integer types will yield an integer result, rounding towards 0. If you need floating point division, <> one of the arguments to a `DOUBLE`. +Supported types: + include::types/div.asciidoc[] ==== Modulus `%` [.text-center] image::esql/functions/signature/mod.svg[Embedded,opts=inline] +Divide one number by another and return the remainder. If either field is <> +then the result is `null`. + +Supported types: + include::types/mod.asciidoc[] diff --git a/docs/reference/esql/functions/description/avg.asciidoc b/docs/reference/esql/functions/description/avg.asciidoc new file mode 100644 index 0000000000000..545d7e8394e8b --- /dev/null +++ b/docs/reference/esql/functions/description/avg.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +The average of a numeric field. diff --git a/docs/reference/esql/functions/description/locate.asciidoc b/docs/reference/esql/functions/description/locate.asciidoc index 60a6d435e37b6..e5a6fba512432 100644 --- a/docs/reference/esql/functions/description/locate.asciidoc +++ b/docs/reference/esql/functions/description/locate.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns an integer that indicates the position of a keyword substring within another string +Returns an integer that indicates the position of a keyword substring within another string. diff --git a/docs/reference/esql/functions/description/max.asciidoc b/docs/reference/esql/functions/description/max.asciidoc new file mode 100644 index 0000000000000..27a76ed69c3c0 --- /dev/null +++ b/docs/reference/esql/functions/description/max.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +The maximum value of a field. diff --git a/docs/reference/esql/functions/description/min.asciidoc b/docs/reference/esql/functions/description/min.asciidoc new file mode 100644 index 0000000000000..406125b5761d1 --- /dev/null +++ b/docs/reference/esql/functions/description/min.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +The minimum value of a field. diff --git a/docs/reference/esql/functions/description/st_distance.asciidoc b/docs/reference/esql/functions/description/st_distance.asciidoc new file mode 100644 index 0000000000000..b27fcef0eb4f7 --- /dev/null +++ b/docs/reference/esql/functions/description/st_distance.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the distance between two points. For cartesian geometries, this is the pythagorean distance in the same units as the original coordinates. For geographic geometries, this is the circular distance along the great circle in meters. diff --git a/docs/reference/esql/functions/description/substring.asciidoc b/docs/reference/esql/functions/description/substring.asciidoc index edb97b219bbe0..3d8091f26c04d 100644 --- a/docs/reference/esql/functions/description/substring.asciidoc +++ b/docs/reference/esql/functions/description/substring.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns a substring of a string, specified by a start position and an optional length +Returns a substring of a string, specified by a start position and an optional length. diff --git a/docs/reference/esql/functions/description/top.asciidoc b/docs/reference/esql/functions/description/top.asciidoc new file mode 100644 index 0000000000000..39b31e17aec55 --- /dev/null +++ b/docs/reference/esql/functions/description/top.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Collects the top values for a field. Includes repeated values. diff --git a/docs/reference/esql/functions/examples/avg.asciidoc b/docs/reference/esql/functions/examples/avg.asciidoc new file mode 100644 index 0000000000000..b6193ad50ed21 --- /dev/null +++ b/docs/reference/esql/functions/examples/avg.asciidoc @@ -0,0 +1,22 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=avg] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=avg-result] +|=== +The expression can use inline functions. For example, to calculate the average over a multivalued column, first use `MV_AVG` to average the multiple values per row, and use the result with the `AVG` function +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=docsStatsAvgNestedExpression] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=docsStatsAvgNestedExpression-result] +|=== + diff --git a/docs/reference/esql/functions/max.asciidoc b/docs/reference/esql/functions/examples/max.asciidoc similarity index 55% rename from docs/reference/esql/functions/max.asciidoc rename to docs/reference/esql/functions/examples/max.asciidoc index f2e0d0a0205b3..dc57118931ef7 100644 --- a/docs/reference/esql/functions/max.asciidoc +++ b/docs/reference/esql/functions/examples/max.asciidoc @@ -1,24 +1,6 @@ -[discrete] -[[esql-agg-max]] -=== `MAX` +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. -*Syntax* - -[source,esql] ----- -MAX(expression) ----- - -*Parameters* - -`expression`:: -Expression from which to return the maximum value. - -*Description* - -Returns the maximum value of a numeric expression. - -*Example* +*Examples* [source.merge.styled,esql] ---- @@ -28,11 +10,7 @@ include::{esql-specs}/stats.csv-spec[tag=max] |=== include::{esql-specs}/stats.csv-spec[tag=max-result] |=== - -The expression can use inline functions. For example, to calculate the maximum -over an average of a multivalued column, use `MV_AVG` to first average the -multiple values per row, and use the result with the `MAX` function: - +The expression can use inline functions. For example, to calculate the maximum over an average of a multivalued column, use `MV_AVG` to first average the multiple values per row, and use the result with the `MAX` function [source.merge.styled,esql] ---- include::{esql-specs}/stats.csv-spec[tag=docsStatsMaxNestedExpression] @@ -40,4 +18,5 @@ include::{esql-specs}/stats.csv-spec[tag=docsStatsMaxNestedExpression] [%header.monospaced.styled,format=dsv,separator=|] |=== include::{esql-specs}/stats.csv-spec[tag=docsStatsMaxNestedExpression-result] -|=== \ No newline at end of file +|=== + diff --git a/docs/reference/esql/functions/min.asciidoc b/docs/reference/esql/functions/examples/min.asciidoc similarity index 55% rename from docs/reference/esql/functions/min.asciidoc rename to docs/reference/esql/functions/examples/min.asciidoc index 313822818128c..b4088196d750b 100644 --- a/docs/reference/esql/functions/min.asciidoc +++ b/docs/reference/esql/functions/examples/min.asciidoc @@ -1,24 +1,6 @@ -[discrete] -[[esql-agg-min]] -=== `MIN` +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. -*Syntax* - -[source,esql] ----- -MIN(expression) ----- - -*Parameters* - -`expression`:: -Expression from which to return the minimum value. - -*Description* - -Returns the minimum value of a numeric expression. - -*Example* +*Examples* [source.merge.styled,esql] ---- @@ -28,11 +10,7 @@ include::{esql-specs}/stats.csv-spec[tag=min] |=== include::{esql-specs}/stats.csv-spec[tag=min-result] |=== - -The expression can use inline functions. For example, to calculate the minimum -over an average of a multivalued column, use `MV_AVG` to first average the -multiple values per row, and use the result with the `MIN` function: - +The expression can use inline functions. For example, to calculate the minimum over an average of a multivalued column, use `MV_AVG` to first average the multiple values per row, and use the result with the `MIN` function [source.merge.styled,esql] ---- include::{esql-specs}/stats.csv-spec[tag=docsStatsMinNestedExpression] @@ -41,3 +19,4 @@ include::{esql-specs}/stats.csv-spec[tag=docsStatsMinNestedExpression] |=== include::{esql-specs}/stats.csv-spec[tag=docsStatsMinNestedExpression-result] |=== + diff --git a/docs/reference/esql/functions/examples/st_distance.asciidoc b/docs/reference/esql/functions/examples/st_distance.asciidoc new file mode 100644 index 0000000000000..60da852eff736 --- /dev/null +++ b/docs/reference/esql/functions/examples/st_distance.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=st_distance-airports] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=st_distance-airports-result] +|=== + diff --git a/docs/reference/esql/functions/examples/top.asciidoc b/docs/reference/esql/functions/examples/top.asciidoc new file mode 100644 index 0000000000000..3d48d3c346c9e --- /dev/null +++ b/docs/reference/esql/functions/examples/top.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_top.csv-spec[tag=top] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_top.csv-spec[tag=top-result] +|=== + diff --git a/docs/reference/esql/functions/ip-functions.asciidoc b/docs/reference/esql/functions/ip-functions.asciidoc index 55c808e587a18..0d58e24c02945 100644 --- a/docs/reference/esql/functions/ip-functions.asciidoc +++ b/docs/reference/esql/functions/ip-functions.asciidoc @@ -9,6 +9,8 @@ // tag::ip_list[] * <> +* <> // end::ip_list[] include::layout/cidr_match.asciidoc[] +include::layout/ip_prefix.asciidoc[] diff --git a/docs/reference/esql/functions/kibana/definition/avg.json b/docs/reference/esql/functions/kibana/definition/avg.json new file mode 100644 index 0000000000000..eb0be684a468e --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/avg.json @@ -0,0 +1,48 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "avg", + "description" : "The average of a numeric field.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM employees\n| STATS AVG(height)", + "FROM employees\n| STATS avg_salary_change = ROUND(AVG(MV_AVG(salary_change)), 10)" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/locate.json b/docs/reference/esql/functions/kibana/definition/locate.json index 13b7512e17def..2097c90b41958 100644 --- a/docs/reference/esql/functions/kibana/definition/locate.json +++ b/docs/reference/esql/functions/kibana/definition/locate.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "locate", - "description" : "Returns an integer that indicates the position of a keyword substring within another string", + "description" : "Returns an integer that indicates the position of a keyword substring within another string.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json new file mode 100644 index 0000000000000..bc7380bd76dd4 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -0,0 +1,72 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "max", + "description" : "The maximum value of a field.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "FROM employees\n| STATS MAX(languages)", + "FROM employees\n| STATS max_avg_salary_change = MAX(MV_AVG(salary_change))" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json new file mode 100644 index 0000000000000..937391bf242ac --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -0,0 +1,72 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "min", + "description" : "The minimum value of a field.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "FROM employees\n| STATS MIN(languages)", + "FROM employees\n| STATS min_avg_salary_change = MIN(MV_AVG(salary_change))" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/st_distance.json b/docs/reference/esql/functions/kibana/definition/st_distance.json new file mode 100644 index 0000000000000..448e0d54051da --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_distance.json @@ -0,0 +1,47 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_distance", + "description" : "Computes the distance between two points.\nFor cartesian geometries, this is the pythagorean distance in the same units as the original coordinates.\nFor geographic geometries, this is the circular distance along the great circle in meters.", + "signatures" : [ + { + "params" : [ + { + "name" : "geomA", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`." + }, + { + "name" : "geomB", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_point` and `cartesian_point` parameters." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "geomA", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`." + }, + { + "name" : "geomB", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_point` and `cartesian_point` parameters." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airports\n| WHERE abbrev == \"CPH\"\n| EVAL distance = ST_DISTANCE(location, city_location)\n| KEEP abbrev, name, location, city_location, distance" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/substring.json b/docs/reference/esql/functions/kibana/definition/substring.json index 25f432796cc8d..b38b545822a90 100644 --- a/docs/reference/esql/functions/kibana/definition/substring.json +++ b/docs/reference/esql/functions/kibana/definition/substring.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "substring", - "description" : "Returns a substring of a string, specified by a start position and an optional length", + "description" : "Returns a substring of a string, specified by a start position and an optional length.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/top.json b/docs/reference/esql/functions/kibana/definition/top.json new file mode 100644 index 0000000000000..7ad073d6e7564 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/top.json @@ -0,0 +1,107 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "top", + "description" : "Collects the top values for a field. Includes repeated values.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "datetime", + "optional" : false, + "description" : "The field to collect the top values for." + }, + { + "name" : "limit", + "type" : "integer", + "optional" : false, + "description" : "The maximum number of values to collect." + }, + { + "name" : "order", + "type" : "keyword", + "optional" : false, + "description" : "The order to calculate the top values. Either `asc` or `desc`." + } + ], + "variadic" : false, + "returnType" : "datetime" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "The field to collect the top values for." + }, + { + "name" : "limit", + "type" : "integer", + "optional" : false, + "description" : "The maximum number of values to collect." + }, + { + "name" : "order", + "type" : "keyword", + "optional" : false, + "description" : "The order to calculate the top values. Either `asc` or `desc`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "The field to collect the top values for." + }, + { + "name" : "limit", + "type" : "integer", + "optional" : false, + "description" : "The maximum number of values to collect." + }, + { + "name" : "order", + "type" : "keyword", + "optional" : false, + "description" : "The order to calculate the top values. Either `asc` or `desc`." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "The field to collect the top values for." + }, + { + "name" : "limit", + "type" : "integer", + "optional" : false, + "description" : "The maximum number of values to collect." + }, + { + "name" : "order", + "type" : "keyword", + "optional" : false, + "description" : "The order to calculate the top values. Either `asc` or `desc`." + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "FROM employees\n| STATS top_salaries = TOP(salary, 3, \"desc\"), top_salary = MAX(salary)" + ] +} diff --git a/docs/reference/esql/functions/kibana/docs/avg.md b/docs/reference/esql/functions/kibana/docs/avg.md new file mode 100644 index 0000000000000..54006a0556175 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/avg.md @@ -0,0 +1,11 @@ + + +### AVG +The average of a numeric field. + +``` +FROM employees +| STATS AVG(height) +``` diff --git a/docs/reference/esql/functions/kibana/docs/locate.md b/docs/reference/esql/functions/kibana/docs/locate.md index 7fffbfd548f20..75275068d3096 100644 --- a/docs/reference/esql/functions/kibana/docs/locate.md +++ b/docs/reference/esql/functions/kibana/docs/locate.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### LOCATE -Returns an integer that indicates the position of a keyword substring within another string +Returns an integer that indicates the position of a keyword substring within another string. ``` row a = "hello" diff --git a/docs/reference/esql/functions/kibana/docs/max.md b/docs/reference/esql/functions/kibana/docs/max.md new file mode 100644 index 0000000000000..80e88885e7f34 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/max.md @@ -0,0 +1,11 @@ + + +### MAX +The maximum value of a field. + +``` +FROM employees +| STATS MAX(languages) +``` diff --git a/docs/reference/esql/functions/kibana/docs/min.md b/docs/reference/esql/functions/kibana/docs/min.md new file mode 100644 index 0000000000000..38d13b97fd344 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/min.md @@ -0,0 +1,11 @@ + + +### MIN +The minimum value of a field. + +``` +FROM employees +| STATS MIN(languages) +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_distance.md b/docs/reference/esql/functions/kibana/docs/st_distance.md new file mode 100644 index 0000000000000..7ea2d5a255357 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_distance.md @@ -0,0 +1,15 @@ + + +### ST_DISTANCE +Computes the distance between two points. +For cartesian geometries, this is the pythagorean distance in the same units as the original coordinates. +For geographic geometries, this is the circular distance along the great circle in meters. + +``` +FROM airports +| WHERE abbrev == "CPH" +| EVAL distance = ST_DISTANCE(location, city_location) +| KEEP abbrev, name, location, city_location, distance +``` diff --git a/docs/reference/esql/functions/kibana/docs/substring.md b/docs/reference/esql/functions/kibana/docs/substring.md index 62c4eb33c2e95..5f2601a279f6f 100644 --- a/docs/reference/esql/functions/kibana/docs/substring.md +++ b/docs/reference/esql/functions/kibana/docs/substring.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### SUBSTRING -Returns a substring of a string, specified by a start position and an optional length +Returns a substring of a string, specified by a start position and an optional length. ``` FROM employees diff --git a/docs/reference/esql/functions/kibana/docs/top.md b/docs/reference/esql/functions/kibana/docs/top.md new file mode 100644 index 0000000000000..10db4e7ac5b55 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/top.md @@ -0,0 +1,11 @@ + + +### TOP +Collects the top values for a field. Includes repeated values. + +``` +FROM employees +| STATS top_salaries = TOP(salary, 3, "desc"), top_salary = MAX(salary) +``` diff --git a/docs/reference/esql/functions/layout/avg.asciidoc b/docs/reference/esql/functions/layout/avg.asciidoc new file mode 100644 index 0000000000000..8292af8e75554 --- /dev/null +++ b/docs/reference/esql/functions/layout/avg.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-avg]] +=== `AVG` + +*Syntax* + +[.text-center] +image::esql/functions/signature/avg.svg[Embedded,opts=inline] + +include::../parameters/avg.asciidoc[] +include::../description/avg.asciidoc[] +include::../types/avg.asciidoc[] +include::../examples/avg.asciidoc[] diff --git a/docs/reference/esql/functions/layout/max.asciidoc b/docs/reference/esql/functions/layout/max.asciidoc new file mode 100644 index 0000000000000..a4eb3d99c0d02 --- /dev/null +++ b/docs/reference/esql/functions/layout/max.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-max]] +=== `MAX` + +*Syntax* + +[.text-center] +image::esql/functions/signature/max.svg[Embedded,opts=inline] + +include::../parameters/max.asciidoc[] +include::../description/max.asciidoc[] +include::../types/max.asciidoc[] +include::../examples/max.asciidoc[] diff --git a/docs/reference/esql/functions/layout/min.asciidoc b/docs/reference/esql/functions/layout/min.asciidoc new file mode 100644 index 0000000000000..60ad2cc21b561 --- /dev/null +++ b/docs/reference/esql/functions/layout/min.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-min]] +=== `MIN` + +*Syntax* + +[.text-center] +image::esql/functions/signature/min.svg[Embedded,opts=inline] + +include::../parameters/min.asciidoc[] +include::../description/min.asciidoc[] +include::../types/min.asciidoc[] +include::../examples/min.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_distance.asciidoc b/docs/reference/esql/functions/layout/st_distance.asciidoc new file mode 100644 index 0000000000000..159b071ce63a7 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_distance.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_distance]] +=== `ST_DISTANCE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_distance.svg[Embedded,opts=inline] + +include::../parameters/st_distance.asciidoc[] +include::../description/st_distance.asciidoc[] +include::../types/st_distance.asciidoc[] +include::../examples/st_distance.asciidoc[] diff --git a/docs/reference/esql/functions/layout/top.asciidoc b/docs/reference/esql/functions/layout/top.asciidoc new file mode 100644 index 0000000000000..a29a7c96a3697 --- /dev/null +++ b/docs/reference/esql/functions/layout/top.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-top]] +=== `TOP` + +*Syntax* + +[.text-center] +image::esql/functions/signature/top.svg[Embedded,opts=inline] + +include::../parameters/top.asciidoc[] +include::../description/top.asciidoc[] +include::../types/top.asciidoc[] +include::../examples/top.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/avg.asciidoc b/docs/reference/esql/functions/parameters/avg.asciidoc new file mode 100644 index 0000000000000..91c56709d182a --- /dev/null +++ b/docs/reference/esql/functions/parameters/avg.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: + diff --git a/docs/reference/esql/functions/parameters/max.asciidoc b/docs/reference/esql/functions/parameters/max.asciidoc new file mode 100644 index 0000000000000..8903aa1a472a3 --- /dev/null +++ b/docs/reference/esql/functions/parameters/max.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: + diff --git a/docs/reference/esql/functions/parameters/min.asciidoc b/docs/reference/esql/functions/parameters/min.asciidoc new file mode 100644 index 0000000000000..8903aa1a472a3 --- /dev/null +++ b/docs/reference/esql/functions/parameters/min.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: + diff --git a/docs/reference/esql/functions/parameters/st_distance.asciidoc b/docs/reference/esql/functions/parameters/st_distance.asciidoc new file mode 100644 index 0000000000000..f32433dfbf6fb --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_distance.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`geomA`:: +Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. + +`geomB`:: +Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_point` and `cartesian_point` parameters. diff --git a/docs/reference/esql/functions/parameters/top.asciidoc b/docs/reference/esql/functions/parameters/top.asciidoc new file mode 100644 index 0000000000000..979bca393b5aa --- /dev/null +++ b/docs/reference/esql/functions/parameters/top.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +The field to collect the top values for. + +`limit`:: +The maximum number of values to collect. + +`order`:: +The order to calculate the top values. Either `asc` or `desc`. diff --git a/docs/reference/esql/functions/signature/avg.svg b/docs/reference/esql/functions/signature/avg.svg new file mode 100644 index 0000000000000..f325358aff960 --- /dev/null +++ b/docs/reference/esql/functions/signature/avg.svg @@ -0,0 +1 @@ +AVG(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/max.svg b/docs/reference/esql/functions/signature/max.svg new file mode 100644 index 0000000000000..dda43dfbfbba2 --- /dev/null +++ b/docs/reference/esql/functions/signature/max.svg @@ -0,0 +1 @@ +MAX(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/min.svg b/docs/reference/esql/functions/signature/min.svg new file mode 100644 index 0000000000000..e654d3027fee8 --- /dev/null +++ b/docs/reference/esql/functions/signature/min.svg @@ -0,0 +1 @@ +MIN(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_distance.svg b/docs/reference/esql/functions/signature/st_distance.svg new file mode 100644 index 0000000000000..1831a139a719f --- /dev/null +++ b/docs/reference/esql/functions/signature/st_distance.svg @@ -0,0 +1 @@ +ST_DISTANCE(geomA,geomB) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/top.svg b/docs/reference/esql/functions/signature/top.svg new file mode 100644 index 0000000000000..cfd15e0d94ac4 --- /dev/null +++ b/docs/reference/esql/functions/signature/top.svg @@ -0,0 +1 @@ +TOP(field,limit,order) \ No newline at end of file diff --git a/docs/reference/esql/functions/spatial-functions.asciidoc b/docs/reference/esql/functions/spatial-functions.asciidoc index d143681fcf2f2..79acc2028d983 100644 --- a/docs/reference/esql/functions/spatial-functions.asciidoc +++ b/docs/reference/esql/functions/spatial-functions.asciidoc @@ -14,6 +14,7 @@ * experimental:[] <> * experimental:[] <> * experimental:[] <> +* experimental:[] <> // end::spatial_list[] include::layout/st_intersects.asciidoc[] @@ -22,3 +23,4 @@ include::layout/st_contains.asciidoc[] include::layout/st_within.asciidoc[] include::layout/st_x.asciidoc[] include::layout/st_y.asciidoc[] +include::layout/st_distance.asciidoc[] diff --git a/docs/reference/esql/functions/types/avg.asciidoc b/docs/reference/esql/functions/types/avg.asciidoc new file mode 100644 index 0000000000000..273dae4af76c2 --- /dev/null +++ b/docs/reference/esql/functions/types/avg.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | result +double | double +integer | double +long | double +|=== diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc new file mode 100644 index 0000000000000..6515c6bfc48d2 --- /dev/null +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +boolean | boolean +datetime | datetime +double | double +integer | integer +long | long +|=== diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc new file mode 100644 index 0000000000000..6515c6bfc48d2 --- /dev/null +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +boolean | boolean +datetime | datetime +double | double +integer | integer +long | long +|=== diff --git a/docs/reference/esql/functions/types/st_distance.asciidoc b/docs/reference/esql/functions/types/st_distance.asciidoc new file mode 100644 index 0000000000000..c6ae485f3f535 --- /dev/null +++ b/docs/reference/esql/functions/types/st_distance.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +geomA | geomB | result +cartesian_point | cartesian_point | double +geo_point | geo_point | double +|=== diff --git a/docs/reference/esql/functions/types/top.asciidoc b/docs/reference/esql/functions/types/top.asciidoc new file mode 100644 index 0000000000000..1874cd8b12bf3 --- /dev/null +++ b/docs/reference/esql/functions/types/top.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | limit | order | result +datetime | integer | keyword | datetime +double | integer | keyword | double +integer | integer | keyword | integer +long | integer | keyword | long +|=== diff --git a/docs/reference/esql/functions/weighted-avg.asciidoc b/docs/reference/esql/functions/weighted-avg.asciidoc new file mode 100644 index 0000000000000..4f166801641df --- /dev/null +++ b/docs/reference/esql/functions/weighted-avg.asciidoc @@ -0,0 +1,35 @@ +[discrete] +[[esql-agg-weighted-avg]] +=== `WEIGHTED_AVG` + +*Syntax* + +[source,esql] +---- +WEIGHTED_AVG(expression, weight) +---- + +`expression`:: +Numeric expression. + +`weight`:: +Numeric weight. + +*Description* + +The weighted average of a numeric expression. + +*Supported types* + +The result is always a `double` no matter the input type. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=weighted-avg] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=weighted-avg-result] +|=== diff --git a/docs/reference/esql/processing-commands/dissect.asciidoc b/docs/reference/esql/processing-commands/dissect.asciidoc index c48b72af0de7e..72c811a318a5d 100644 --- a/docs/reference/esql/processing-commands/dissect.asciidoc +++ b/docs/reference/esql/processing-commands/dissect.asciidoc @@ -2,6 +2,9 @@ [[esql-dissect]] === `DISSECT` +`DISSECT` enables you to <>. + **Syntax** [source,esql] @@ -56,4 +59,4 @@ include::{esql-specs}/docs.csv-spec[tag=dissectWithToDatetime] include::{esql-specs}/docs.csv-spec[tag=dissectWithToDatetime-result] |=== -// end::examples[] \ No newline at end of file +// end::examples[] diff --git a/docs/reference/esql/processing-commands/drop.asciidoc b/docs/reference/esql/processing-commands/drop.asciidoc index 8f03141d5e05a..c81f438f81c3b 100644 --- a/docs/reference/esql/processing-commands/drop.asciidoc +++ b/docs/reference/esql/processing-commands/drop.asciidoc @@ -2,6 +2,8 @@ [[esql-drop]] === `DROP` +The `DROP` processing command removes one or more columns. + **Syntax** [source,esql] @@ -14,10 +16,6 @@ DROP columns `columns`:: A comma-separated list of columns to remove. Supports wildcards. -*Description* - -The `DROP` processing command removes one or more columns. - *Examples* [source,esql] diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc index 5470d81b2f40b..f34e77dbf5c23 100644 --- a/docs/reference/esql/processing-commands/enrich.asciidoc +++ b/docs/reference/esql/processing-commands/enrich.asciidoc @@ -2,6 +2,9 @@ [[esql-enrich]] === `ENRICH` +`ENRICH` enables you to add data from existing indices as new columns using an +enrich policy. + **Syntax** [source,esql] diff --git a/docs/reference/esql/processing-commands/eval.asciidoc b/docs/reference/esql/processing-commands/eval.asciidoc index 9b34fca7ceeff..f77249736c1b3 100644 --- a/docs/reference/esql/processing-commands/eval.asciidoc +++ b/docs/reference/esql/processing-commands/eval.asciidoc @@ -2,6 +2,9 @@ [[esql-eval]] === `EVAL` +The `EVAL` processing command enables you to append new columns with calculated +values. + **Syntax** [source,esql] diff --git a/docs/reference/esql/processing-commands/grok.asciidoc b/docs/reference/esql/processing-commands/grok.asciidoc index d5d58a9eaee12..d631d17f7a42c 100644 --- a/docs/reference/esql/processing-commands/grok.asciidoc +++ b/docs/reference/esql/processing-commands/grok.asciidoc @@ -2,6 +2,9 @@ [[esql-grok]] === `GROK` +`GROK` enables you to <>. + **Syntax** [source,esql] diff --git a/docs/reference/esql/processing-commands/keep.asciidoc b/docs/reference/esql/processing-commands/keep.asciidoc index 57f32a68aec4c..468f459411640 100644 --- a/docs/reference/esql/processing-commands/keep.asciidoc +++ b/docs/reference/esql/processing-commands/keep.asciidoc @@ -2,6 +2,9 @@ [[esql-keep]] === `KEEP` +The `KEEP` processing command enables you to specify what columns are returned +and the order in which they are returned. + **Syntax** [source,esql] @@ -70,7 +73,7 @@ include::{esql-specs}/docs.csv-spec[tag=keepDoubleWildcard] include::{esql-specs}/docs.csv-spec[tag=keep-double-wildcard-result] |=== -The following examples show how precedence rules work when a field name matches multiple expressions. +The following examples show how precedence rules work when a field name matches multiple expressions. Complete field name has precedence over wildcard expressions: diff --git a/docs/reference/esql/processing-commands/limit.asciidoc b/docs/reference/esql/processing-commands/limit.asciidoc index 4ccf3024a4c1e..78d05672ea095 100644 --- a/docs/reference/esql/processing-commands/limit.asciidoc +++ b/docs/reference/esql/processing-commands/limit.asciidoc @@ -2,6 +2,9 @@ [[esql-limit]] === `LIMIT` +The `LIMIT` processing command enables you to limit the number of rows that are +returned. + **Syntax** [source,esql] diff --git a/docs/reference/esql/processing-commands/lookup.asciidoc b/docs/reference/esql/processing-commands/lookup.asciidoc index 1944d243968a8..426527bf4d2d6 100644 --- a/docs/reference/esql/processing-commands/lookup.asciidoc +++ b/docs/reference/esql/processing-commands/lookup.asciidoc @@ -4,6 +4,9 @@ experimental::["LOOKUP is a highly experimental and only available in SNAPSHOT versions."] +`LOOKUP` matches values from the input against a `table` provided in the request, +adding the other fields from the `table` to the output. + **Syntax** [source,esql] @@ -19,11 +22,6 @@ The name of the `table` provided in the request to match. `match_field`:: The fields in the input to match against the table. -*Description* - -`LOOKUP` matches values from the input against a `table` provided in the request, -adding the other fields from the `table` to the output. - *Examples* // tag::examples[] diff --git a/docs/reference/esql/processing-commands/mv_expand.asciidoc b/docs/reference/esql/processing-commands/mv_expand.asciidoc index 9e1cb5573c381..010701f7fc8ee 100644 --- a/docs/reference/esql/processing-commands/mv_expand.asciidoc +++ b/docs/reference/esql/processing-commands/mv_expand.asciidoc @@ -4,6 +4,9 @@ preview::[] +The `MV_EXPAND` processing command expands multivalued columns into one row per +value, duplicating other columns. + **Syntax** [source,esql] @@ -16,11 +19,6 @@ MV_EXPAND column `column`:: The multivalued column to expand. -*Description* - -The `MV_EXPAND` processing command expands multivalued columns into one row per -value, duplicating other columns. - *Example* [source.merge.styled,esql] diff --git a/docs/reference/esql/processing-commands/rename.asciidoc b/docs/reference/esql/processing-commands/rename.asciidoc index 773fe8b640f75..8507a826f085d 100644 --- a/docs/reference/esql/processing-commands/rename.asciidoc +++ b/docs/reference/esql/processing-commands/rename.asciidoc @@ -2,6 +2,8 @@ [[esql-rename]] === `RENAME` +The `RENAME` processing command renames one or more columns. + **Syntax** [source,esql] diff --git a/docs/reference/esql/processing-commands/sort.asciidoc b/docs/reference/esql/processing-commands/sort.asciidoc index fea7bfaf0c65f..e76b9c76ab273 100644 --- a/docs/reference/esql/processing-commands/sort.asciidoc +++ b/docs/reference/esql/processing-commands/sort.asciidoc @@ -2,6 +2,8 @@ [[esql-sort]] === `SORT` +The `SORT` processing command sorts a table on one or more columns. + **Syntax** [source,esql] diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc index fe84c56bbfc19..34ae81fd5414e 100644 --- a/docs/reference/esql/processing-commands/stats.asciidoc +++ b/docs/reference/esql/processing-commands/stats.asciidoc @@ -2,11 +2,14 @@ [[esql-stats-by]] === `STATS ... BY` +The `STATS ... BY` processing command groups rows according to a common value +and calculate one or more aggregated values over the grouped rows. + **Syntax** [source,esql] ---- -STATS [column1 =] expression1[, ..., [columnN =] expressionN] +STATS [column1 =] expression1[, ..., [columnN =] expressionN] [BY grouping_expression1[, ..., grouping_expressionN]] ---- @@ -39,8 +42,8 @@ NOTE: `STATS` without any groups is much much faster than adding a group. NOTE: Grouping on a single expression is currently much more optimized than grouping on many expressions. In some tests we have seen grouping on a single `keyword` - column to be five times faster than grouping on two `keyword` columns. Do - not try to work around this by combining the two columns together with + column to be five times faster than grouping on two `keyword` columns. Do + not try to work around this by combining the two columns together with something like <> and then grouping - that is not going to be faster. @@ -80,14 +83,36 @@ include::{esql-specs}/stats.csv-spec[tag=statsCalcMultipleValues] include::{esql-specs}/stats.csv-spec[tag=statsCalcMultipleValues-result] |=== -It's also possible to group by multiple values (only supported for long and -keyword family fields): +[[esql-stats-mv-group]] +If the grouping key is multivalued then the input row is in all groups: + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=mv-group] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=mv-group-result] +|=== + +It's also possible to group by multiple values: [source,esql] ---- include::{esql-specs}/stats.csv-spec[tag=statsGroupByMultipleValues] ---- +If the all grouping keys are multivalued then the input row is in all groups: + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=multi-mv-group] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=multi-mv-group-result] +|=== + Both the aggregating functions and the grouping expressions accept other functions. This is useful for using `STATS...BY` on multivalue columns. For example, to calculate the average salary change, you can use `MV_AVG` to diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc index 3076f92c40fc0..407df30c57215 100644 --- a/docs/reference/esql/processing-commands/where.asciidoc +++ b/docs/reference/esql/processing-commands/where.asciidoc @@ -2,6 +2,9 @@ [[esql-where]] === `WHERE` +The `WHERE` processing command produces a table that contains all the rows from +the input table for which the provided condition evaluates to `true`. + **Syntax** [source,esql] @@ -14,11 +17,6 @@ WHERE expression `expression`:: A boolean expression. -*Description* - -The `WHERE` processing command produces a table that contains all the rows from -the input table for which the provided condition evaluates to `true`. - *Examples* [source,esql] @@ -33,7 +31,7 @@ Which, if `still_hired` is a boolean field, can be simplified to: include::{esql-specs}/docs.csv-spec[tag=whereBoolean] ---- -Use date math to retrieve data from a specific time range. For example, to +Use date math to retrieve data from a specific time range. For example, to retrieve the last hour of logs: [source,esql] @@ -59,4 +57,4 @@ include::../functions/rlike.asciidoc[tag=body] include::../functions/in.asciidoc[tag=body] -For a complete list of all operators, refer to <>. \ No newline at end of file +For a complete list of all operators, refer to <>. diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index d81c46530e089..6ed517efca7cf 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -2,6 +2,9 @@ [[esql-from]] === `FROM` +The `FROM` source command returns a table with data from a data stream, index, +or alias. + **Syntax** [source,esql] @@ -82,3 +85,11 @@ Use the optional `METADATA` directive to enable <>. -*Description* - -The `ROW` source command produces a row with one or more columns with values -that you specify. This can be useful for testing. - *Examples* [source.merge.styled,esql] diff --git a/docs/reference/esql/source-commands/show.asciidoc b/docs/reference/esql/source-commands/show.asciidoc index 298ea5d8f92b9..7090ab790133f 100644 --- a/docs/reference/esql/source-commands/show.asciidoc +++ b/docs/reference/esql/source-commands/show.asciidoc @@ -2,6 +2,9 @@ [[esql-show]] === `SHOW` +The `SHOW` source command returns information about the deployment and +its capabilities. + **Syntax** [source,esql] @@ -14,15 +17,10 @@ SHOW item `item`:: Can only be `INFO`. -*Description* - -The `SHOW` source command returns information about the deployment and -its capabilities: - -* Use `SHOW INFO` to return the deployment's version, build date and hash. - *Examples* +Use `SHOW INFO` to return the deployment's version, build date and hash. + [source,esql] ---- SHOW INFO diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 40b4ff4bb9dc8..24149afe802a2 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -80,7 +80,10 @@ breaking change]. compression ratio, at the expense of slower stored fields performance. If you are updating the compression type, the new one will be applied after segments are merged. Segment merging can be forced using - <>. + <>. Experiments with indexing log datasets + have shown that `best_compression` gives up to ~18% lower storage usage in + the most ideal scenario compared to `default` while only minimally affecting + indexing throughput (~2%). [[index-mode-setting]] `index.mode`:: + diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc index dd355eccbca2a..1334a96872459 100644 --- a/docs/reference/index-modules/index-sorting.asciidoc +++ b/docs/reference/index-modules/index-sorting.asciidoc @@ -6,9 +6,8 @@ inside each Shard will be sorted. By default Lucene does not apply any sort. The `index.sort.*` settings define which fields should be used to sort the documents inside each Segment. [WARNING] -nested fields are not compatible with index sorting because they rely on the assumption -that nested documents are stored in contiguous doc ids, which can be broken by index sorting. -An error will be thrown if index sorting is activated on an index that contains nested fields. +It is allowed to apply index sorting to mappings with nested objects, so long as the +`index.sort.*` setting contains no nested fields. For instance the following example shows how to define a sort on a single field: diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc index dca800c98ca2e..4df72ba672092 100644 --- a/docs/reference/inference/delete-inference.asciidoc +++ b/docs/reference/inference/delete-inference.asciidoc @@ -7,8 +7,8 @@ experimental[] Deletes an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI or +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. @@ -50,11 +50,11 @@ The type of {infer} task that the model performs. `dry_run`:: (Optional, Boolean) When `true`, checks the {infer} processors that reference the endpoint and -returns them in a list, but does not deletes the endpoint. Defaults to `false`. +returns them in a list, but does not delete the endpoint. Defaults to `false`. `force`:: (Optional, Boolean) -Deletes the endpoint regardless if it's used in an {infer} pipeline or a in a +Deletes the endpoint regardless if it's used in an {infer} pipeline or in a `semantic_text` field. diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc index 339146adfece9..c3fe841603bcc 100644 --- a/docs/reference/inference/get-inference.asciidoc +++ b/docs/reference/inference/get-inference.asciidoc @@ -7,8 +7,8 @@ experimental[] Retrieves {infer} endpoint information. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI or +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. @@ -65,7 +65,7 @@ The type of {infer} task that the model performs. [[get-inference-api-example]] ==== {api-examples-title} -The following API call retrives information about the `my-elser-model` {infer} +The following API call retrieves information about the `my-elser-model` {infer} model that can perform `sparse_embedding` tasks. diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 539bba3f0d61f..02a57504da1cf 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -6,7 +6,7 @@ experimental[] IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. @@ -25,3 +25,13 @@ include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] include::put-inference.asciidoc[] +include::service-azure-ai-studio.asciidoc[] +include::service-azure-openai.asciidoc[] +include::service-cohere.asciidoc[] +include::service-elasticsearch.asciidoc[] +include::service-elser.asciidoc[] +include::service-google-ai-studio.asciidoc[] +include::service-google-vertex-ai.asciidoc[] +include::service-hugging-face.asciidoc[] +include::service-mistral.asciidoc[] +include::service-openai.asciidoc[] diff --git a/docs/reference/inference/inference-shared.asciidoc b/docs/reference/inference/inference-shared.asciidoc new file mode 100644 index 0000000000000..2eafa3434e89e --- /dev/null +++ b/docs/reference/inference/inference-shared.asciidoc @@ -0,0 +1,34 @@ + +tag::api-key-admonition[] +IMPORTANT: You need to provide the API key only once, during the {infer} model creation. +The <> does not retrieve your API key. +After creating the {infer} model, you cannot change the associated API key. +If you want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. +end::api-key-admonition[] + +tag::inference-id[] +The unique identifier of the {infer} endpoint. +end::inference-id[] + +tag::request-per-minute-example[] +[source,text] +---- +"rate_limit": { + "requests_per_minute": <> +} +---- +end::request-per-minute-example[] + + +tag::service-settings[] +Settings used to install the {infer} model. +end::service-settings[] + +tag::task-settings[] +Settings to configure the {infer} task. +These settings are specific to the `` you specified. +end::task-settings[] + +tag::task-type[] +The type of the {infer} task that the model will perform. +end::task-type[] \ No newline at end of file diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index 1414e45c07616..52131c0b10776 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -7,8 +7,8 @@ experimental[] Performs an inference task on an input text by using an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio or -Hugging Face. For built-in models and models uploaded though Eland, the {infer} +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI or +Hugging Face. For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 22ec4fe8fa728..656feb54ffe42 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -7,10 +7,10 @@ experimental[] Creates an {infer} endpoint to perform an {infer} task. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio or Hugging Face. -For built-in models and models uploaded though Eland, the {infer} APIs offer an alternative way to use and manage trained models. -However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the -<>. +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI or Hugging Face. +For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. + [discrete] [[put-inference-api-request]] @@ -25,851 +25,22 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo * Requires the `manage_inference` <> (the built-in `inference_admin` role grants this privilege) + [discrete] [[put-inference-api-desc]] ==== {api-description-title} -The create {infer} API enables you to create an {infer} endpoint and configure a -{ml} model to perform a specific {infer} task. - -The following services are available through the {infer} API: - -* Azure AI Studio -* Azure OpenAI -* Cohere -* Elasticsearch (for built-in models and models uploaded through Eland) -* ELSER -* Google AI Studio -* Hugging Face -* Mistral -* OpenAI - -[discrete] -[[put-inference-api-path-params]] -==== {api-path-parms-title} - -``:: -(Required, string) -The unique identifier of the {infer} endpoint. - -``:: -(Required, string) -The type of the {infer} task that the model will perform. -Available task types: -* `completion`, -* `rerank`, -* `sparse_embedding`, -* `text_embedding`. - -[discrete] -[[put-inference-api-request-body]] -==== {api-request-body-title} - -`service`:: -(Required, string) -The type of service supported for the specified task type. -Available services: - -* `azureopenai`: specify the `completion` or `text_embedding` task type to use the Azure OpenAI service. -* `azureaistudio`: specify the `completion` or `text_embedding` task type to use the Azure AI Studio service. -* `cohere`: specify the `completion`, `text_embedding` or the `rerank` task type to use the Cohere service. -* `elasticsearch`: specify the `text_embedding` task type to use the E5 built-in model or text embedding models uploaded by Eland. -* `elser`: specify the `sparse_embedding` task type to use the ELSER service. -* `googleaistudio`: specify the `completion` or `text_embeddig` task to use the Google AI Studio service. -* `hugging_face`: specify the `text_embedding` task type to use the Hugging Face service. -* `mistral`: specify the `text_embedding` task type to use the Mistral service. -* `openai`: specify the `completion` or `text_embedding` task type to use the OpenAI service. - - -`service_settings`:: -(Required, object) -Settings used to install the {infer} model. -These settings are specific to the -`service` you specified. -+ -.`service_settings` for the `azureaistudio` service -[%collapsible%closed] -===== - -`api_key`::: -(Required, string) -A valid API key of your Azure AI Studio model deployment. -This key can be found on the overview page for your deployment in the management section of your https://ai.azure.com/[Azure AI Studio] account. - -IMPORTANT: You need to provide the API key only once, during the {infer} model creation. -The <> does not retrieve your API key. -After creating the {infer} model, you cannot change the associated API key. -If you want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. - -`target`::: -(Required, string) -The target URL of your Azure AI Studio model deployment. -This can be found on the overview page for your deployment in the management section of your https://ai.azure.com/[Azure AI Studio] account. - -`provider`::: -(Required, string) -The model provider for your deployment. -Note that some providers may support only certain task types. -Supported providers include: - -* `cohere` - available for `text_embedding` and `completion` task types -* `databricks` - available for `completion` task type only -* `meta` - available for `completion` task type only -* `microsoft_phi` - available for `completion` task type only -* `mistral` - available for `completion` task type only -* `openai` - available for `text_embedding` and `completion` task types - -`endpoint_type`::: -(Required, string) -One of `token` or `realtime`. -Specifies the type of endpoint that is used in your model deployment. -There are https://learn.microsoft.com/en-us/azure/ai-studio/concepts/deployments-overview#billing-for-deploying-and-inferencing-llms-in-azure-ai-studio[two endpoint types available] for deployment through Azure AI Studio. -"Pay as you go" endpoints are billed per token. -For these, you must specify `token` for your `endpoint_type`. -For "real-time" endpoints which are billed per hour of usage, specify `realtime`. - -`rate_limit`::: -(Optional, object) -By default, the `azureaistudio` service sets the number of requests allowed per minute to `240`. -This helps to minimize the number of rate limit errors returned from Azure AI Studio. -To modify this, set the `requests_per_minute` setting of this object in your service settings: -+ -[source,text] ----- -"rate_limit": { - "requests_per_minute": <> -} ----- -===== -+ -.`service_settings` for the `azureopenai` service -[%collapsible%closed] -===== - -`api_key` or `entra_id`::: -(Required, string) -You must provide _either_ an API key or an Entra ID. -If you do not provide either, or provide both, you will receive an error when trying to create your model. -See the https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication[Azure OpenAI Authentication documentation] for more details on these authentication types. - -IMPORTANT: You need to provide the API key or Entra ID only once, during the {infer} model creation. -The <> does not retrieve your authentication credentials. -After creating the {infer} model, you cannot change the associated API key or Entra ID. -If you want to use a different API key or Entra ID, delete the {infer} model and recreate it with the same name and the updated API key. -You _must_ have either an `api_key` or an `entra_id` defined. -If neither are present, an error will occur. - -`resource_name`::: -(Required, string) -The name of your Azure OpenAI resource. -You can find this from the https://portal.azure.com/#view/HubsExtension/BrowseAll[list of resources] in the Azure Portal for your subscription. - -`deployment_id`::: -(Required, string) -The deployment name of your deployed models. -Your Azure OpenAI deployments can be found though the https://oai.azure.com/[Azure OpenAI Studio] portal that is linked to your subscription. - -`api_version`::: -(Required, string) -The Azure API version ID to use. -We recommend using the https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings[latest supported non-preview version]. - -`rate_limit`::: -(Optional, object) -The `azureopenai` service sets a default number of requests allowed per minute depending on the task type. -For `text_embedding` it is set to `1440`. -For `completion` it is set to `120`. -This helps to minimize the number of rate limit errors returned from Azure. -To modify this, set the `requests_per_minute` setting of this object in your service settings: -+ -[source,text] ----- -"rate_limit": { - "requests_per_minute": <> -} ----- -+ -More information about the rate limits for Azure can be found in the https://learn.microsoft.com/en-us/azure/ai-services/openai/quotas-limits[Quota limits docs] and https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/quota?tabs=rest[How to change the quotas]. -===== -+ -.`service_settings` for the `cohere` service -[%collapsible%closed] -===== -`api_key`::: -(Required, string) -A valid API key of your Cohere account. -You can find your Cohere API keys or you can create a new one -https://dashboard.cohere.com/api-keys[on the API keys settings page]. - -IMPORTANT: You need to provide the API key only once, during the {infer} model creation. -The <> does not retrieve your API key. -After creating the {infer} model, you cannot change the associated API key. -If you want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. - -`embedding_type`:: -(Optional, string) -Only for `text_embedding`. -Specifies the types of embeddings you want to get back. -Defaults to `float`. -Valid values are: -* `byte`: use it for signed int8 embeddings (this is a synonym of `int8`). -* `float`: use it for the default float embeddings. -* `int8`: use it for signed int8 embeddings. - -`model_id`:: -(Optional, string) -The name of the model to use for the {infer} task. -To review the available `rerank` models, refer to the -https://docs.cohere.com/reference/rerank-1[Cohere docs]. - -To review the available `text_embedding` models, refer to the -https://docs.cohere.com/reference/embed[Cohere docs]. -The default value for -`text_embedding` is `embed-english-v2.0`. - -`rate_limit`::: -(Optional, object) -By default, the `cohere` service sets the number of requests allowed per minute to `10000`. -This value is the same for all task types. -This helps to minimize the number of rate limit errors returned from Cohere. -To modify this, set the `requests_per_minute` setting of this object in your service settings: -+ -[source,text] ----- -"rate_limit": { - "requests_per_minute": <> -} ----- -+ -More information about Cohere's rate limits can be found in https://docs.cohere.com/docs/going-live#production-key-specifications[Cohere's production key docs]. - -===== -+ -.`service_settings` for the `elasticsearch` service -[%collapsible%closed] -===== - -`model_id`::: -(Required, string) -The name of the model to use for the {infer} task. -It can be the ID of either a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model already -{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. - -`num_allocations`::: -(Required, integer) -The number of model allocations to create. `num_allocations` must not exceed the number of available processors per node divided by the `num_threads`. - -`num_threads`::: -(Required, integer) -The number of threads to use by each model allocation. `num_threads` must not exceed the number of available processors per node divided by the number of allocations. -Must be a power of 2. Max allowed value is 32. - -===== -+ -.`service_settings` for the `elser` service -[%collapsible%closed] -===== - -`num_allocations`::: -(Required, integer) -The number of model allocations to create. `num_allocations` must not exceed the number of available processors per node divided by the `num_threads`. - -`num_threads`::: -(Required, integer) -The number of threads to use by each model allocation. `num_threads` must not exceed the number of available processors per node divided by the number of allocations. -Must be a power of 2. Max allowed value is 32. - -===== -+ -.`service_settings` for the `googleiastudio` service -[%collapsible%closed] -===== - -`api_key`::: -(Required, string) -A valid API key for the Google Gemini API. - -`model_id`::: -(Required, string) -The name of the model to use for the {infer} task. -You can find the supported models at https://ai.google.dev/gemini-api/docs/models/gemini[Gemini API models]. - -`rate_limit`::: -(Optional, object) -By default, the `googleaistudio` service sets the number of requests allowed per minute to `360`. -This helps to minimize the number of rate limit errors returned from Google AI Studio. -To modify this, set the `requests_per_minute` setting of this object in your service settings: -+ --- -[source,text] ----- -"rate_limit": { - "requests_per_minute": <> -} ----- --- - -===== -+ -.`service_settings` for the `hugging_face` service -[%collapsible%closed] -===== - -`api_key`::: -(Required, string) -A valid access token of your Hugging Face account. -You can find your Hugging Face access tokens or you can create a new one -https://huggingface.co/settings/tokens[on the settings page]. - -IMPORTANT: You need to provide the API key only once, during the {infer} model creation. -The <> does not retrieve your API key. -After creating the {infer} model, you cannot change the associated API key. -If you want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. - -`url`::: -(Required, string) -The URL endpoint to use for the requests. - -`rate_limit`::: -(Optional, object) -By default, the `huggingface` service sets the number of requests allowed per minute to `3000`. -This helps to minimize the number of rate limit errors returned from Hugging Face. -To modify this, set the `requests_per_minute` setting of this object in your service settings: -+ -[source,text] ----- -"rate_limit": { - "requests_per_minute": <> -} ----- - -===== -+ -.`service_settings` for the `mistral` service -[%collapsible%closed] -===== - -`api_key`::: -(Required, string) -A valid API key for your Mistral account. -You can find your Mistral API keys or you can create a new one -https://console.mistral.ai/api-keys/[on the API Keys page]. - -`model`::: -(Required, string) -The name of the model to use for the {infer} task. -Refer to the https://docs.mistral.ai/getting-started/models/[Mistral models documentation] -for the list of available text embedding models. - -`max_input_tokens`::: -(Optional, integer) -Allows you to specify the maximum number of tokens per input before chunking occurs. - -`rate_limit`::: -(Optional, object) -By default, the `mistral` service sets the number of requests allowed per minute to `240`. -This helps to minimize the number of rate limit errors returned from the Mistral API. -To modify this, set the `requests_per_minute` setting of this object in your service settings: -+ -[source,text] ----- -"rate_limit": { - "requests_per_minute": <> -} ----- - -===== -+ -.`service_settings` for the `openai` service -[%collapsible%closed] -===== - -`api_key`::: -(Required, string) -A valid API key of your OpenAI account. -You can find your OpenAI API keys in your OpenAI account under the -https://platform.openai.com/api-keys[API keys section]. - -IMPORTANT: You need to provide the API key only once, during the {infer} model creation. -The <> does not retrieve your API key. -After creating the {infer} model, you cannot change the associated API key. -If you want to use a different API key, delete the {infer} model and recreate it with the same name and the updated API key. - -`model_id`::: -(Required, string) -The name of the model to use for the {infer} task. -Refer to the -https://platform.openai.com/docs/guides/embeddings/what-are-embeddings[OpenAI documentation] -for the list of available text embedding models. - -`organization_id`::: -(Optional, string) -The unique identifier of your organization. -You can find the Organization ID in your OpenAI account under -https://platform.openai.com/account/organization[**Settings** > **Organizations**]. - -`url`::: -(Optional, string) -The URL endpoint to use for the requests. -Can be changed for testing purposes. -Defaults to `https://api.openai.com/v1/embeddings`. - -`rate_limit`::: -(Optional, object) -The `openai` service sets a default number of requests allowed per minute depending on the task type. -For `text_embedding` it is set to `3000`. -For `completion` it is set to `500`. -This helps to minimize the number of rate limit errors returned from Azure. -To modify this, set the `requests_per_minute` setting of this object in your service settings: -+ -[source,text] ----- -"rate_limit": { - "requests_per_minute": <> -} ----- -+ -More information about the rate limits for OpenAI can be found in your https://platform.openai.com/account/limits[Account limits]. - -===== - -`task_settings`:: -(Optional, object) -Settings to configure the {infer} task. -These settings are specific to the -`` you specified. -+ -.`task_settings` for the `completion` task type -[%collapsible%closed] -===== - -`do_sample`::: -(Optional, float) -For the `azureaistudio` service only. -Instructs the inference process to perform sampling or not. -Has not affect unless `temperature` or `top_p` is specified. - -`max_new_tokens`::: -(Optional, integer) -For the `azureaistudio` service only. -Provides a hint for the maximum number of output tokens to be generated. -Defaults to 64. - -`user`::: -(Optional, string) -For `openai` service only. -Specifies the user issuing the request, which can be used for abuse detection. - -`temperature`::: -(Optional, float) -For the `azureaistudio` service only. -A number in the range of 0.0 to 2.0 that specifies the sampling temperature to use that controls the apparent creativity of generated completions. -Should not be used if `top_p` is specified. - -`top_p`::: -(Optional, float) -For the `azureaistudio` service only. -A number in the range of 0.0 to 2.0 that is an alternative value to temperature that causes the model to consider the results of the tokens with nucleus sampling probability. -Should not be used if `temperature` is specified. - -===== -+ -.`task_settings` for the `rerank` task type -[%collapsible%closed] -===== - -`return_documents`:: -(Optional, boolean) -For `cohere` service only. -Specify whether to return doc text within the results. - -`top_n`:: -(Optional, integer) -The number of most relevant documents to return, defaults to the number of the documents. - -===== -+ -.`task_settings` for the `text_embedding` task type -[%collapsible%closed] -===== - -`input_type`::: -(Optional, string) -For `cohere` service only. -Specifies the type of input passed to the model. -Valid values are: -* `classification`: use it for embeddings passed through a text classifier. -* `clusterning`: use it for the embeddings run through a clustering algorithm. -* `ingest`: use it for storing document embeddings in a vector database. -* `search`: use it for storing embeddings of search queries run against a vector database to find relevant documents. -+ -IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. - -`truncate`::: -(Optional, string) -For `cohere` service only. -Specifies how the API handles inputs longer than the maximum token length. -Defaults to `END`. -Valid values are: -* `NONE`: when the input exceeds the maximum input token length an error is returned. -* `START`: when the input exceeds the maximum input token length the start of the input is discarded. -* `END`: when the input exceeds the maximum input token length the end of the input is discarded. - -`user`::: -(optional, string) -For `openai`, `azureopenai` and `azureaistudio` services only. -Specifies the user issuing the request, which can be used for abuse detection. - -===== -[discrete] -[[put-inference-api-example]] -==== {api-examples-title} - -This section contains example API calls for every service type. - -[discrete] -[[inference-example-azureaistudio]] -===== Azure AI Studio service - -The following example shows how to create an {infer} endpoint called -`azure_ai_studio_embeddings` to perform a `text_embedding` task type. -Note that we do not specify a model here, as it is defined already via our Azure AI Studio deployment. - -The list of embeddings models that you can choose from in your deployment can be found in the https://ai.azure.com/explore/models?selectedTask=embeddings[Azure AI Studio model explorer]. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/azure_ai_studio_embeddings -{ - "service": "azureaistudio", - "service_settings": { - "api_key": "", - "target": "", - "provider": "", - "endpoint_type": "" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - -The next example shows how to create an {infer} endpoint called -`azure_ai_studio_completion` to perform a `completion` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/completion/azure_ai_studio_completion -{ - "service": "azureaistudio", - "service_settings": { - "api_key": "", - "target": "", - "provider": "", - "endpoint_type": "" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - -The list of chat completion models that you can choose from in your deployment can be found in the https://ai.azure.com/explore/models?selectedTask=chat-completion[Azure AI Studio model explorer]. - -[discrete] -[[inference-example-azureopenai]] -===== Azure OpenAI service - -The following example shows how to create an {infer} endpoint called -`azure_openai_embeddings` to perform a `text_embedding` task type. -Note that we do not specify a model here, as it is defined already via our Azure OpenAI deployment. - -The list of embeddings models that you can choose from in your deployment can be found in the https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#embeddings[Azure models documentation]. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/azure_openai_embeddings -{ - "service": "azureopenai", - "service_settings": { - "api_key": "", - "resource_name": "", - "deployment_id": "", - "api_version": "2024-02-01" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - -The next example shows how to create an {infer} endpoint called -`azure_openai_completion` to perform a `completion` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/completion/azure_openai_completion -{ - "service": "azureopenai", - "service_settings": { - "api_key": "", - "resource_name": "", - "deployment_id": "", - "api_version": "2024-02-01" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - -The list of chat completion models that you can choose from in your Azure OpenAI deployment can be found at the following places: - -* https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-models[GPT-4 and GPT-4 Turbo models] -* https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35[GPT-3.5] - -[discrete] -[[inference-example-cohere]] -===== Cohere service - -The following example shows how to create an {infer} endpoint called -`cohere-embeddings` to perform a `text_embedding` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/cohere-embeddings -{ - "service": "cohere", - "service_settings": { - "api_key": "", - "model_id": "embed-english-light-v3.0", - "embedding_type": "byte" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - - -The following example shows how to create an {infer} endpoint called -`cohere-rerank` to perform a `rerank` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/rerank/cohere-rerank -{ - "service": "cohere", - "service_settings": { - "api_key": "", - "model_id": "rerank-english-v3.0" - }, - "task_settings": { - "top_n": 10, - "return_documents": true - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - -For more examples, also review the -https://docs.cohere.com/docs/elasticsearch-and-cohere#rerank-search-results-with-cohere-and-elasticsearch[Cohere documentation]. - -[discrete] -[[inference-example-e5]] -===== E5 via the `elasticsearch` service - -The following example shows how to create an {infer} endpoint called -`my-e5-model` to perform a `text_embedding` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/my-e5-model -{ - "service": "elasticsearch", - "service_settings": { - "num_allocations": 1, - "num_threads": 1, - "model_id": ".multilingual-e5-small" <1> - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The `model_id` must be the ID of one of the built-in E5 models. -Valid values are `.multilingual-e5-small` and `.multilingual-e5-small_linux-x86_64`. -For further details, refer to the {ml-docs}/ml-nlp-e5.html[E5 model documentation]. - -[discrete] -[[inference-example-elser]] -===== ELSER service - -The following example shows how to create an {infer} endpoint called -`my-elser-model` to perform a `sparse_embedding` task type. -Refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation] for more info. - -[source,console] ------------------------------------------------------------- -PUT _inference/sparse_embedding/my-elser-model -{ - "service": "elser", - "service_settings": { - "num_allocations": 1, - "num_threads": 1 - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - - -Example response: - -[source,console-result] ------------------------------------------------------------- -{ - "inference_id": "my-elser-model", - "task_type": "sparse_embedding", - "service": "elser", - "service_settings": { - "num_allocations": 1, - "num_threads": 1 - }, - "task_settings": {} -} ------------------------------------------------------------- -// NOTCONSOLE - - -[discrete] -[[inference-example-googleaistudio]] -===== Google AI Studio service - -The following example shows how to create an {infer} endpoint called -`google_ai_studio_completion` to perform a `completion` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/completion/google_ai_studio_completion -{ - "service": "googleaistudio", - "service_settings": { - "api_key": "", - "model_id": "" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] - - -[discrete] -[[inference-example-hugging-face]] -===== Hugging Face service - -The following example shows how to create an {infer} endpoint called -`hugging-face-embeddings` to perform a `text_embedding` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/hugging-face-embeddings -{ - "service": "hugging_face", - "service_settings": { - "api_key": "", <1> - "url": "" <2> - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> A valid Hugging Face access token. -You can find on the -https://huggingface.co/settings/tokens[settings page of your account]. -<2> The {infer} endpoint URL you created on Hugging Face. - -Create a new {infer} endpoint on -https://ui.endpoints.huggingface.co/[the Hugging Face endpoint page] to get an endpoint URL. -Select the model you want to use on the new endpoint creation page - for example `intfloat/e5-small-v2` - then select the `Sentence Embeddings` -task under the Advanced configuration section. -Create the endpoint. -Copy the URL after the endpoint initialization has been finished. - -[discrete] -[[inference-example-hugging-face-supported-models]] -The list of recommended models for the Hugging Face service: - -* https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2[all-MiniLM-L6-v2] -* https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2[all-MiniLM-L12-v2] -* https://huggingface.co/sentence-transformers/all-mpnet-base-v2[all-mpnet-base-v2] -* https://huggingface.co/intfloat/e5-base-v2[e5-base-v2] -* https://huggingface.co/intfloat/e5-small-v2[e5-small-v2] -* https://huggingface.co/intfloat/multilingual-e5-base[multilingual-e5-base] -* https://huggingface.co/intfloat/multilingual-e5-small[multilingual-e5-small] - -[discrete] -[[inference-example-eland]] -===== Models uploaded by Eland via the elasticsearch service - -The following example shows how to create an {infer} endpoint called -`my-msmarco-minilm-model` to perform a `text_embedding` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/my-msmarco-minilm-model -{ - "service": "elasticsearch", - "service_settings": { - "num_allocations": 1, - "num_threads": 1, - "model_id": "msmarco-MiniLM-L12-cos-v5" <1> - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The `model_id` must be the ID of a text embedding model which has already been -{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. - -[discrete] -[[inference-example-mistral]] -===== Mistral Service - -The following example shows how to create an {infer} endpoint called -`mistral-embeddings-test` to perform a `text_embedding` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/mistral-embeddings-test -{ - "service": "mistral", - "service_settings": { - "api_key": "", - "model": "mistral-embed" <1> - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The `model` must be the ID of a text embedding model which can be found in the -https://docs.mistral.ai/getting-started/models/[Mistral models documentation] - -[discrete] -[[inference-example-openai]] -===== OpenAI service - -The following example shows how to create an {infer} endpoint called -`openai-embeddings` to perform a `text_embedding` task type. - -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/openai-embeddings -{ - "service": "openai", - "service_settings": { - "api_key": "", - "model_id": "text-embedding-ada-002" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] +The create {infer} API enables you to create an {infer} endpoint and configure a {ml} model to perform a specific {infer} task. -The next example shows how to create an {infer} endpoint called -`openai-completion` to perform a `completion` task type. +The following services are available through the {infer} API, click the links to review the configuration details of the services: -[source,console] ------------------------------------------------------------- -PUT _inference/completion/openai-completion -{ - "service": "openai", - "service_settings": { - "api_key": "", - "model_id": "gpt-3.5-turbo" - } -} ------------------------------------------------------------- -// TEST[skip:TBD] +* <> +* <> +* <> +* <> (for built-in models and models uploaded through Eland) +* <> +* <> +* <> +* <> +* <> +* <> diff --git a/docs/reference/inference/service-azure-ai-studio.asciidoc b/docs/reference/inference/service-azure-ai-studio.asciidoc new file mode 100644 index 0000000000000..0d711a0d6171f --- /dev/null +++ b/docs/reference/inference/service-azure-ai-studio.asciidoc @@ -0,0 +1,173 @@ +[[infer-service-azure-ai-studio]] +=== Azure AI studio {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `azureaistudio` service. + + +[discrete] +[[infer-service-azure-ai-studio-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-azure-ai-studio-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `completion`, +* `text_embedding`. +-- + +[discrete] +[[infer-service-azure-ai-studio-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`azureaistudio`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `azureaistudio` service. +-- + +`api_key`::: +(Required, string) +A valid API key of your Azure AI Studio model deployment. +This key can be found on the overview page for your deployment in the management section of your https://ai.azure.com/[Azure AI Studio] account. ++ +-- +include::inference-shared.asciidoc[tag=api-key-admonition] +-- + +`target`::: +(Required, string) +The target URL of your Azure AI Studio model deployment. +This can be found on the overview page for your deployment in the management section of your https://ai.azure.com/[Azure AI Studio] account. + +`provider`::: +(Required, string) +The model provider for your deployment. +Note that some providers may support only certain task types. +Supported providers include: + +* `cohere` - available for `text_embedding` and `completion` task types +* `databricks` - available for `completion` task type only +* `meta` - available for `completion` task type only +* `microsoft_phi` - available for `completion` task type only +* `mistral` - available for `completion` task type only +* `openai` - available for `text_embedding` and `completion` task types + +`endpoint_type`::: +(Required, string) +One of `token` or `realtime`. +Specifies the type of endpoint that is used in your model deployment. +There are https://learn.microsoft.com/en-us/azure/ai-studio/concepts/deployments-overview#billing-for-deploying-and-inferencing-llms-in-azure-ai-studio[two endpoint types available] for deployment through Azure AI Studio. +"Pay as you go" endpoints are billed per token. +For these, you must specify `token` for your `endpoint_type`. +For "real-time" endpoints which are billed per hour of usage, specify `realtime`. + +`rate_limit`::: +(Optional, object) +By default, the `azureaistudio` service sets the number of requests allowed per minute to `240`. +This helps to minimize the number of rate limit errors returned from Azure AI Studio. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] +-- + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `completion` task type +[%collapsible%closed] +===== +`do_sample`::: +(Optional, float) +Instructs the inference process to perform sampling or not. +Has no effect unless `temperature` or `top_p` is specified. + +`max_new_tokens`::: +(Optional, integer) +Provides a hint for the maximum number of output tokens to be generated. +Defaults to 64. + +`temperature`::: +(Optional, float) +A number in the range of 0.0 to 2.0 that specifies the sampling temperature to use that controls the apparent creativity of generated completions. +Should not be used if `top_p` is specified. + +`top_p`::: +(Optional, float) +A number in the range of 0.0 to 2.0 that is an alternative value to temperature that causes the model to consider the results of the tokens with nucleus sampling probability. +Should not be used if `temperature` is specified. +===== ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`user`::: +(optional, string) +Specifies the user issuing the request, which can be used for abuse detection. +===== + + +[discrete] +[[inference-example-azureaistudio]] +==== Azure AI Studio service example + +The following example shows how to create an {infer} endpoint called `azure_ai_studio_embeddings` to perform a `text_embedding` task type. +Note that we do not specify a model here, as it is defined already via our Azure AI Studio deployment. + +The list of embeddings models that you can choose from in your deployment can be found in the https://ai.azure.com/explore/models?selectedTask=embeddings[Azure AI Studio model explorer]. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/azure_ai_studio_embeddings +{ + "service": "azureaistudio", + "service_settings": { + "api_key": "", + "target": "", + "provider": "", + "endpoint_type": "" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The next example shows how to create an {infer} endpoint called `azure_ai_studio_completion` to perform a `completion` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/completion/azure_ai_studio_completion +{ + "service": "azureaistudio", + "service_settings": { + "api_key": "", + "target": "", + "provider": "", + "endpoint_type": "" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The list of chat completion models that you can choose from in your deployment can be found in the https://ai.azure.com/explore/models?selectedTask=chat-completion[Azure AI Studio model explorer]. diff --git a/docs/reference/inference/service-azure-openai.asciidoc b/docs/reference/inference/service-azure-openai.asciidoc new file mode 100644 index 0000000000000..6f03c5966d9e6 --- /dev/null +++ b/docs/reference/inference/service-azure-openai.asciidoc @@ -0,0 +1,156 @@ +[[infer-service-azure-openai]] +=== Azure OpenAI {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `azureopenai` service. + + +[discrete] +[[infer-service-azure-openai-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-azure-openai-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `completion`, +* `text_embedding`. +-- + +[discrete] +[[infer-service-azure-openai-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`azureopenai`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `azureopenai` service. +-- + +`api_key` or `entra_id`::: +(Required, string) +You must provide _either_ an API key or an Entra ID. +If you do not provide either, or provide both, you will receive an error when trying to create your model. +See the https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#authentication[Azure OpenAI Authentication documentation] for more details on these authentication types. ++ +-- +include::inference-shared.asciidoc[tag=api-key-admonition] +-- + +`resource_name`::: +(Required, string) +The name of your Azure OpenAI resource. +You can find this from the https://portal.azure.com/#view/HubsExtension/BrowseAll[list of resources] in the Azure Portal for your subscription. + +`deployment_id`::: +(Required, string) +The deployment name of your deployed models. +Your Azure OpenAI deployments can be found though the https://oai.azure.com/[Azure OpenAI Studio] portal that is linked to your subscription. + +`api_version`::: +(Required, string) +The Azure API version ID to use. +We recommend using the https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings[latest supported non-preview version]. + +`rate_limit`::: +(Optional, object) +The `azureopenai` service sets a default number of requests allowed per minute depending on the task type. +For `text_embedding` it is set to `1440`. +For `completion` it is set to `120`. +This helps to minimize the number of rate limit errors returned from Azure. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] + +More information about the rate limits for Azure can be found in the https://learn.microsoft.com/en-us/azure/ai-services/openai/quotas-limits[Quota limits docs] and https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/quota?tabs=rest[How to change the quotas]. +-- + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `completion` task type +[%collapsible%closed] +===== +`user`::: +(optional, string) +Specifies the user issuing the request, which can be used for abuse detection. +===== ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`user`::: +(optional, string) +Specifies the user issuing the request, which can be used for abuse detection. +===== + + + +[discrete] +[[inference-example-azure-openai]] +==== Azure OpenAI service example + +The following example shows how to create an {infer} endpoint called +`azure_openai_embeddings` to perform a `text_embedding` task type. +Note that we do not specify a model here, as it is defined already via our Azure OpenAI deployment. + +The list of embeddings models that you can choose from in your deployment can be found in the https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#embeddings[Azure models documentation]. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/azure_openai_embeddings +{ + "service": "azureopenai", + "service_settings": { + "api_key": "", + "resource_name": "", + "deployment_id": "", + "api_version": "2024-02-01" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The next example shows how to create an {infer} endpoint called +`azure_openai_completion` to perform a `completion` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/completion/azure_openai_completion +{ + "service": "azureopenai", + "service_settings": { + "api_key": "", + "resource_name": "", + "deployment_id": "", + "api_version": "2024-02-01" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The list of chat completion models that you can choose from in your Azure OpenAI deployment can be found at the following places: + +* https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-and-gpt-4-turbo-models[GPT-4 and GPT-4 Turbo models] +* https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35[GPT-3.5] \ No newline at end of file diff --git a/docs/reference/inference/service-cohere.asciidoc b/docs/reference/inference/service-cohere.asciidoc new file mode 100644 index 0000000000000..52d71e0bc02a5 --- /dev/null +++ b/docs/reference/inference/service-cohere.asciidoc @@ -0,0 +1,204 @@ +[[infer-service-cohere]] +=== Cohere {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `cohere` service. + + +[discrete] +[[infer-service-cohere-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-cohere-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `completion`, +* `rerank`, +* `text_embedding`. +-- + +[discrete] +[[infer-service-cohere-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`cohere`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `cohere` service. +-- + +`api_key`::: +(Required, string) +A valid API key of your Cohere account. +You can find your Cohere API keys or you can create a new one +https://dashboard.cohere.com/api-keys[on the API keys settings page]. ++ +-- +include::inference-shared.asciidoc[tag=api-key-admonition] +-- + +`rate_limit`::: +(Optional, object) +By default, the `cohere` service sets the number of requests allowed per minute to `10000`. +This value is the same for all task types. +This helps to minimize the number of rate limit errors returned from Cohere. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] + +More information about Cohere's rate limits can be found in https://docs.cohere.com/docs/going-live#production-key-specifications[Cohere's production key docs]. +-- ++ +.`service_settings` for the `completion` task type +[%collapsible%closed] +===== +`model_id`:: +(Optional, string) +The name of the model to use for the {infer} task. +To review the available `completion` models, refer to the +https://docs.cohere.com/docs/models#command[Cohere docs]. +===== ++ +.`service_settings` for the `rerank` task type +[%collapsible%closed] +===== +`model_id`:: +(Optional, string) +The name of the model to use for the {infer} task. +To review the available `rerank` models, refer to the +https://docs.cohere.com/reference/rerank-1[Cohere docs]. +===== ++ +.`service_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`embedding_type`::: +(Optional, string) +Specifies the types of embeddings you want to get back. +Defaults to `float`. +Valid values are: +* `byte`: use it for signed int8 embeddings (this is a synonym of `int8`). +* `float`: use it for the default float embeddings. +* `int8`: use it for signed int8 embeddings. + +`model_id`::: +(Optional, string) +The name of the model to use for the {infer} task. +To review the available `text_embedding` models, refer to the +https://docs.cohere.com/reference/embed[Cohere docs]. +The default value for `text_embedding` is `embed-english-v2.0`. + +`similarity`::: +(Optional, string) +Similarity measure. One of `cosine`, `dot_product`, `l2_norm`. +Defaults based on the `embedding_type` (`float` -> `dot_product`, `int8/byte` -> `cosine`). +===== + + + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `rerank` task type +[%collapsible%closed] +===== +`return_documents`:: +(Optional, boolean) +Specify whether to return doc text within the results. + +`top_n`:: +(Optional, integer) +The number of most relevant documents to return, defaults to the number of the documents. +===== ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`input_type`::: +(Optional, string) +Specifies the type of input passed to the model. +Valid values are: +* `classification`: use it for embeddings passed through a text classifier. +* `clusterning`: use it for the embeddings run through a clustering algorithm. +* `ingest`: use it for storing document embeddings in a vector database. +* `search`: use it for storing embeddings of search queries run against a vector database to find relevant documents. ++ +IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. + +`truncate`::: +(Optional, string) +Specifies how the API handles inputs longer than the maximum token length. +Defaults to `END`. +Valid values are: +* `NONE`: when the input exceeds the maximum input token length an error is returned. +* `START`: when the input exceeds the maximum input token length the start of the input is discarded. +* `END`: when the input exceeds the maximum input token length the end of the input is discarded. +===== + + +[discrete] +[[inference-example-cohere]] +==== Cohere service examples + +The following example shows how to create an {infer} endpoint called +`cohere-embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/cohere-embeddings +{ + "service": "cohere", + "service_settings": { + "api_key": "", + "model_id": "embed-english-light-v3.0", + "embedding_type": "byte" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + + +The following example shows how to create an {infer} endpoint called +`cohere-rerank` to perform a `rerank` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/rerank/cohere-rerank +{ + "service": "cohere", + "service_settings": { + "api_key": "", + "model_id": "rerank-english-v3.0" + }, + "task_settings": { + "top_n": 10, + "return_documents": true + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +For more examples, also review the +https://docs.cohere.com/docs/elasticsearch-and-cohere#rerank-search-results-with-cohere-and-elasticsearch[Cohere documentation]. \ No newline at end of file diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc new file mode 100644 index 0000000000000..3b9b5b1928d7b --- /dev/null +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -0,0 +1,122 @@ +[[infer-service-elasticsearch]] +=== Elasticsearch {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. + + +[discrete] +[[infer-service-elasticsearch-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-elasticsearch-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `rerank`, +* `text_embedding`. +-- + +[discrete] +[[infer-service-elasticsearch-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`elasticsearch`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `elasticsearch` service. +-- + +`model_id`::: +(Required, string) +The name of the model to use for the {infer} task. +It can be the ID of either a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model already +{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. + +`num_allocations`::: +(Required, integer) +The total number of allocations this model is assigned across machine learning nodes. Increasing this value generally increases the throughput. + +`num_threads`::: +(Required, integer) +Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. +Must be a power of 2. Max allowed value is 32. + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `rerank` task type +[%collapsible%closed] +===== +`return_documents`::: +(Optional, Boolean) +Returns the document instead of only the index. Defaults to `true`. +===== + + +[discrete] +[[inference-example-elasticsearch]] +==== E5 via the `elasticsearch` service + +The following example shows how to create an {infer} endpoint called +`my-e5-model` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/my-e5-model +{ + "service": "elasticsearch", + "service_settings": { + "num_allocations": 1, + "num_threads": 1, + "model_id": ".multilingual-e5-small" <1> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The `model_id` must be the ID of one of the built-in E5 models. +Valid values are `.multilingual-e5-small` and `.multilingual-e5-small_linux-x86_64`. +For further details, refer to the {ml-docs}/ml-nlp-e5.html[E5 model documentation]. + +[discrete] +[[inference-example-eland]] +==== Models uploaded by Eland via the elasticsearch service + +The following example shows how to create an {infer} endpoint called +`my-msmarco-minilm-model` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/my-msmarco-minilm-model +{ + "service": "elasticsearch", + "service_settings": { + "num_allocations": 1, + "num_threads": 1, + "model_id": "msmarco-MiniLM-L12-cos-v5" <1> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The `model_id` must be the ID of a text embedding model which has already been +{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. \ No newline at end of file diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc new file mode 100644 index 0000000000000..829ff4968c5be --- /dev/null +++ b/docs/reference/inference/service-elser.asciidoc @@ -0,0 +1,95 @@ +[[infer-service-elser]] +=== ELSER {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `elser` service. + + +[discrete] +[[infer-service-elser-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-elser-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `sparse_embedding`. +-- + +[discrete] +[[infer-service-elser-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`elser`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `elser` service. +-- + +`num_allocations`::: +(Required, integer) +The total number of allocations this model is assigned across machine learning nodes. Increasing this value generally increases the throughput. + +`num_threads`::: +(Required, integer) +Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. +Must be a power of 2. Max allowed value is 32. + + +[discrete] +[[inference-example-elser]] +==== ELSER service example + +The following example shows how to create an {infer} endpoint called +`my-elser-model` to perform a `sparse_embedding` task type. +Refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation] for more info. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-model +{ + "service": "elser", + "service_settings": { + "num_allocations": 1, + "num_threads": 1 + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + + +Example response: + +[source,console-result] +------------------------------------------------------------ +{ + "inference_id": "my-elser-model", + "task_type": "sparse_embedding", + "service": "elser", + "service_settings": { + "num_allocations": 1, + "num_threads": 1 + }, + "task_settings": {} +} +------------------------------------------------------------ +// NOTCONSOLE \ No newline at end of file diff --git a/docs/reference/inference/service-google-ai-studio.asciidoc b/docs/reference/inference/service-google-ai-studio.asciidoc new file mode 100644 index 0000000000000..25aa89cd49110 --- /dev/null +++ b/docs/reference/inference/service-google-ai-studio.asciidoc @@ -0,0 +1,87 @@ +[[infer-service-google-ai-studio]] +=== Google AI Studio {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `googleaistudio` service. + + +[discrete] +[[infer-service-google-ai-studio-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-google-ai-studio-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `completion`, +* `text_embedding`. +-- + +[discrete] +[[infer-service-google-ai-studio-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`googleaistudio`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `googleaistudio` service. +-- + +`api_key`::: +(Required, string) +A valid API key for the Google Gemini API. + +`model_id`::: +(Required, string) +The name of the model to use for the {infer} task. +You can find the supported models at https://ai.google.dev/gemini-api/docs/models/gemini[Gemini API models]. + +`rate_limit`::: +(Optional, object) +By default, the `googleaistudio` service sets the number of requests allowed per minute to `360`. +This helps to minimize the number of rate limit errors returned from Google AI Studio. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] +-- + + +[discrete] +[[inference-example-google-ai-studio]] +==== Google AI Studio service example + +The following example shows how to create an {infer} endpoint called +`google_ai_studio_completion` to perform a `completion` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/completion/google_ai_studio_completion +{ + "service": "googleaistudio", + "service_settings": { + "api_key": "", + "model_id": "" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/inference/service-google-vertex-ai.asciidoc b/docs/reference/inference/service-google-vertex-ai.asciidoc new file mode 100644 index 0000000000000..640553ab74626 --- /dev/null +++ b/docs/reference/inference/service-google-vertex-ai.asciidoc @@ -0,0 +1,135 @@ +[[infer-service-google-vertex-ai]] +=== Google Vertex AI {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `googlevertexai` service. + + +[discrete] +[[infer-service-google-vertex-ai-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-google-vertex-ai-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `rerank` +* `text_embedding`. +-- + +[discrete] +[[infer-service-google-vertex-ai-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`googlevertexai`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `googlevertexai` service. +-- + +`service_account_json`::: +(Required, string) +A valid service account in json format for the Google Vertex AI API. + +`model_id`::: +(Required, string) +The name of the model to use for the {infer} task. +You can find the supported models at https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api[Text embeddings API]. + +`location`::: +(Required, string) +The name of the location to use for the {infer} task. +You find the supported locations at https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations[Generative AI on Vertex AI locations]. + +`project_id`::: +(Required, string) +The name of the project to use for the {infer} task. + +`rate_limit`::: +(Optional, object) +By default, the `googlevertexai` service sets the number of requests allowed per minute to `30.000`. +This helps to minimize the number of rate limit errors returned from Google Vertex AI. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] + +More information about the rate limits for Google Vertex AI can be found in the https://cloud.google.com/vertex-ai/docs/quotas[Google Vertex AI Quotas docs]. +-- + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `rerank` task type +[%collapsible%closed] +===== +`top_n`::: +(optional, boolean) +Specifies the number of the top n documents, which should be returned. +===== ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`auto_truncate`::: +(optional, boolean) +Specifies if the API truncates inputs longer than the maximum token length automatically. +===== + +[discrete] +[[inference-example-google-vertex-ai]] +==== Google Vertex AI service example + +The following example shows how to create an {infer} endpoint called +`google_vertex_ai_embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/google_vertex_ai_embeddings +{ + "service": "googlevertexai", + "service_settings": { + "service_account_json": "", + "model_id": "", + "location": "", + "project_id": "" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The next example shows how to create an {infer} endpoint called +`google_vertex_ai_rerank` to perform a `rerank` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/rerank/google_vertex_ai_rerank +{ + "service": "googlevertexai", + "service_settings": { + "service_account_json": "", + "project_id": "" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] diff --git a/docs/reference/inference/service-hugging-face.asciidoc b/docs/reference/inference/service-hugging-face.asciidoc new file mode 100644 index 0000000000000..177a15177d21f --- /dev/null +++ b/docs/reference/inference/service-hugging-face.asciidoc @@ -0,0 +1,114 @@ +[[infer-service-hugging-face]] +=== HuggingFace {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `hugging_face` service. + + +[discrete] +[[infer-service-hugging-face-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-hugging-face-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `text_embedding`. +-- + +[discrete] +[[infer-service-hugging-face-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`hugging_face`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `hugging_face` service. +-- + +`api_key`::: +(Required, string) +A valid access token of your Hugging Face account. +You can find your Hugging Face access tokens or you can create a new one +https://huggingface.co/settings/tokens[on the settings page]. ++ +-- +include::inference-shared.asciidoc[tag=api-key-admonition] +-- + +`url`::: +(Required, string) +The URL endpoint to use for the requests. + +`rate_limit`::: +(Optional, object) +By default, the `huggingface` service sets the number of requests allowed per minute to `3000`. +This helps to minimize the number of rate limit errors returned from Hugging Face. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] +-- + + +[discrete] +[[inference-example-hugging-face]] +==== Hugging Face service example + +The following example shows how to create an {infer} endpoint called +`hugging-face-embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/hugging-face-embeddings +{ + "service": "hugging_face", + "service_settings": { + "api_key": "", <1> + "url": "" <2> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> A valid Hugging Face access token. +You can find on the +https://huggingface.co/settings/tokens[settings page of your account]. +<2> The {infer} endpoint URL you created on Hugging Face. + +Create a new {infer} endpoint on +https://ui.endpoints.huggingface.co/[the Hugging Face endpoint page] to get an endpoint URL. +Select the model you want to use on the new endpoint creation page - for example `intfloat/e5-small-v2` - then select the `Sentence Embeddings` +task under the Advanced configuration section. +Create the endpoint. +Copy the URL after the endpoint initialization has been finished. + +[discrete] +[[inference-example-hugging-face-supported-models]] +The list of recommended models for the Hugging Face service: + +* https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2[all-MiniLM-L6-v2] +* https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2[all-MiniLM-L12-v2] +* https://huggingface.co/sentence-transformers/all-mpnet-base-v2[all-mpnet-base-v2] +* https://huggingface.co/intfloat/e5-base-v2[e5-base-v2] +* https://huggingface.co/intfloat/e5-small-v2[e5-small-v2] +* https://huggingface.co/intfloat/multilingual-e5-base[multilingual-e5-base] +* https://huggingface.co/intfloat/multilingual-e5-small[multilingual-e5-small] \ No newline at end of file diff --git a/docs/reference/inference/service-mistral.asciidoc b/docs/reference/inference/service-mistral.asciidoc new file mode 100644 index 0000000000000..077e610191705 --- /dev/null +++ b/docs/reference/inference/service-mistral.asciidoc @@ -0,0 +1,99 @@ +[[infer-service-mistral]] +=== Mistral {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `mistral` service. + + +[discrete] +[[infer-service-mistral-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-mistral-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `text_embedding`. +-- + +[discrete] +[[infer-service-mistral-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`mistral`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `mistral` service. +-- + +`api_key`::: +(Required, string) +A valid API key for your Mistral account. +You can find your Mistral API keys or you can create a new one +https://console.mistral.ai/api-keys/[on the API Keys page]. ++ +-- +include::inference-shared.asciidoc[tag=api-key-admonition] +-- + +`model`::: +(Required, string) +The name of the model to use for the {infer} task. +Refer to the https://docs.mistral.ai/getting-started/models/[Mistral models documentation] +for the list of available text embedding models. + +`max_input_tokens`::: +(Optional, integer) +Allows you to specify the maximum number of tokens per input before chunking occurs. + +`rate_limit`::: +(Optional, object) +By default, the `mistral` service sets the number of requests allowed per minute to `240`. +This helps to minimize the number of rate limit errors returned from the Mistral API. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] +-- + + +[discrete] +[[inference-example-mistral]] +==== Mistral service example + +The following example shows how to create an {infer} endpoint called +`mistral-embeddings-test` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/mistral-embeddings-test +{ + "service": "mistral", + "service_settings": { + "api_key": "", + "model": "mistral-embed" <1> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The `model` must be the ID of a text embedding model which can be found in the +https://docs.mistral.ai/getting-started/models/[Mistral models documentation]. \ No newline at end of file diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc new file mode 100644 index 0000000000000..075e76dc7d741 --- /dev/null +++ b/docs/reference/inference/service-openai.asciidoc @@ -0,0 +1,147 @@ +[[infer-service-openai]] +=== OpenAI {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `openai` service. + + +[discrete] +[[infer-service-openai-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-openai-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `completion`, +* `text_embedding`. +-- + +[discrete] +[[infer-service-openai-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`openai`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `openai` service. +-- + +`api_key`::: +(Required, string) +A valid API key of your OpenAI account. +You can find your OpenAI API keys in your OpenAI account under the +https://platform.openai.com/api-keys[API keys section]. ++ +-- +include::inference-shared.asciidoc[tag=api-key-admonition] +-- + +`model_id`::: +(Required, string) +The name of the model to use for the {infer} task. +Refer to the +https://platform.openai.com/docs/guides/embeddings/what-are-embeddings[OpenAI documentation] +for the list of available text embedding models. + +`organization_id`::: +(Optional, string) +The unique identifier of your organization. +You can find the Organization ID in your OpenAI account under +https://platform.openai.com/account/organization[**Settings** > **Organizations**]. + +`url`::: +(Optional, string) +The URL endpoint to use for the requests. +Can be changed for testing purposes. +Defaults to `https://api.openai.com/v1/embeddings`. + +`rate_limit`::: +(Optional, object) +The `openai` service sets a default number of requests allowed per minute depending on the task type. +For `text_embedding` it is set to `3000`. +For `completion` it is set to `500`. +This helps to minimize the number of rate limit errors returned from OpenAI. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] + +More information about the rate limits for OpenAI can be found in your https://platform.openai.com/account/limits[Account limits]. +-- + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `completion` task type +[%collapsible%closed] +===== +`user`::: +(Optional, string) +Specifies the user issuing the request, which can be used for abuse detection. +===== ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`user`::: +(optional, string) +Specifies the user issuing the request, which can be used for abuse detection. +===== + + +[discrete] +[[inference-example-openai]] +==== OpenAI service example + +The following example shows how to create an {infer} endpoint called +`openai-embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/openai-embeddings +{ + "service": "openai", + "service_settings": { + "api_key": "", + "model_id": "text-embedding-ada-002" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The next example shows how to create an {infer} endpoint called +`openai-completion` to perform a `completion` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/completion/openai-completion +{ + "service": "openai", + "service_settings": { + "api_key": "", + "model_id": "gpt-3.5-turbo" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc b/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc index ad1ca62e37bbf..85273ee584b9a 100644 --- a/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc +++ b/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc @@ -121,6 +121,10 @@ The amount of time in milliseconds spent fetching data from the cache on success `misses_time_in_millis`:: (Long) The amount of time in milliseconds spent fetching data from the enrich index and updating the cache, on cache misses only. + +`size_in_bytes`:: +(Long) +An _approximation_ of the size in bytes that the enrich cache takes up on the heap. -- [[enrich-stats-api-example]] @@ -172,7 +176,8 @@ The API returns the following response: "misses": 0, "evictions": 0, "hits_time_in_millis": 0, - "misses_time_in_millis": 0 + "misses_time_in_millis": 0, + "size_in_bytes": 0 } ] } @@ -187,3 +192,4 @@ The API returns the following response: // TESTRESPONSE[s/"evictions": 0/"evictions" : $body.cache_stats.0.evictions/] // TESTRESPONSE[s/"hits_time_in_millis": 0/"hits_time_in_millis" : $body.cache_stats.0.hits_time_in_millis/] // TESTRESPONSE[s/"misses_time_in_millis": 0/"misses_time_in_millis" : $body.cache_stats.0.misses_time_in_millis/] +// TESTRESPONSE[s/"size_in_bytes": 0/"size_in_bytes" : $body.cache_stats.0.size_in_bytes/] diff --git a/docs/reference/ingest/processors/redact.asciidoc b/docs/reference/ingest/processors/redact.asciidoc index 2004e48c2ed78..6706106e92655 100644 --- a/docs/reference/ingest/processors/redact.asciidoc +++ b/docs/reference/ingest/processors/redact.asciidoc @@ -4,8 +4,6 @@ Redact ++++ -experimental::[] - The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. The processor can be used to obscure Personal Identifying Information (PII) by configuring it to diff --git a/docs/reference/ingest/search-nlp-tutorial.asciidoc b/docs/reference/ingest/search-nlp-tutorial.asciidoc index d5eacb6951023..afdceeeb8bac2 100644 --- a/docs/reference/ingest/search-nlp-tutorial.asciidoc +++ b/docs/reference/ingest/search-nlp-tutorial.asciidoc @@ -256,4 +256,3 @@ In this guide, we covered how to: * {ml-docs}/ml-nlp-deploy-models.html[Deploying a model ML guide^] * {ml-docs}/ml-nlp-import-model.html#ml-nlp-authentication[Eland Authentication methods^] * <> -// * <> diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index ec824e421e015..903b301ab1a96 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -6,11 +6,11 @@ at index time. The `_source` field itself is not indexed (and thus is not searchable), but it is stored so that it can be returned when executing _fetch_ requests, like <> or <>. -If disk usage is important to you then have a look at -<> which shrinks disk usage at the cost of -only supporting a subset of mappings and slower fetches or (not recommended) -<> which also shrinks disk -usage but disables many features. +If disk usage is important to you, then consider the following options: + +- Using <>, which reconstructs source content at the time of retrieval instead of storing it on disk. This shrinks disk usage, at the cost of slower access to `_source` in <> and <> queries. +- <>. This shrinks disk +usage but disables features that rely on `_source`. include::synthetic-source.asciidoc[] @@ -43,7 +43,7 @@ available then a number of features are not supported: * The <>, <>, and <> APIs. -* In the {kib} link:{kibana-ref}/discover.html[Discover] application, field data will not be displayed. +* In the {kib} link:{kibana-ref}/discover.html[Discover] application, field data will not be displayed. * On the fly <>. diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index a0e7aed177a9c..ccea38cf602da 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -28,45 +28,22 @@ PUT idx While this on the fly reconstruction is *generally* slower than saving the source documents verbatim and loading them at query time, it saves a lot of storage -space. +space. Additional latency can be avoided by not loading `_source` field in queries when it is not needed. + +[[synthetic-source-fields]] +===== Supported fields +Synthetic `_source` is supported by all field types. Depending on implementation details, field types have different properties when used with synthetic `_source`. + +<> construct synthetic `_source` using existing data, most commonly <> and <>. For these field types, no additional space is needed to store the contents of `_source` field. Due to the storage layout of <>, the generated `_source` field undergoes <> compared to original document. + +For all other field types, the original value of the field is stored as is, in the same way as the `_source` field in non-synthetic mode. In this case there are no modifications and field data in `_source` is the same as in the original document. Similarly, malformed values of fields that use <> or <> need to be stored as is. This approach is less storage efficient since data needed for `_source` reconstruction is stored in addition to other data required to index the field (like `doc_values`). [[synthetic-source-restrictions]] ===== Synthetic `_source` restrictions -There are a couple of restrictions to be aware of: +Synthetic `_source` cannot be used together with field mappings that use <>. -* When you retrieve synthetic `_source` content it undergoes minor -<> compared to the original JSON. -* Synthetic `_source` can be used with indices that contain only these field -types: - -** <> -** {plugins}/mapper-annotated-text-usage.html#annotated-text-synthetic-source[`annotated-text`] -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> -** <> +Some field types have additional restrictions. These restrictions are documented in the **synthetic `_source`** section of the field type's <>. [[synthetic-source-modifications]] ===== Synthetic `_source` modifications @@ -178,4 +155,40 @@ that ordering. [[synthetic-source-modifications-ranges]] ====== Representation of ranges -Range field vales (e.g. `long_range`) are always represented as inclusive on both sides with bounds adjusted accordingly. See <>. +Range field values (e.g. `long_range`) are always represented as inclusive on both sides with bounds adjusted accordingly. See <>. + +[[synthetic-source-precision-loss-for-point-types]] +====== Reduced precision of `geo_point` values +Values of `geo_point` fields are represented in synthetic `_source` with reduced precision. See <>. + + +[[synthetic-source-fields-native-list]] +===== Field types that support synthetic source with no storage overhead +The following field types support synthetic source using data from <> or <>, and require no additional storage space to construct the `_source` field. + +NOTE: If you enable the <> or <> settings, then additional storage is required to store ignored field values for these types. + +** <> +** {plugins}/mapper-annotated-text-usage.html#annotated-text-synthetic-source[`annotated-text`] +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 8759059a319da..f2f0b3ae8bb23 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -183,11 +183,23 @@ The following mapping parameters are accepted: `element_type`:: (Optional, string) The data type used to encode vectors. The supported data types are -`float` (default) and `byte`. `float` indexes a 4-byte floating-point -value per dimension. `byte` indexes a 1-byte integer value per dimension. -Using `byte` can result in a substantially smaller index size with the -trade off of lower precision. Vectors using `byte` require dimensions with -integer values between -128 to 127, inclusive for both indexing and searching. +`float` (default), `byte`, and bit. + +.Valid values for `element_type` +[%collapsible%open] +==== +`float`::: +indexes a 4-byte floating-point +value per dimension. This is the default value. + +`byte`::: +indexes a 1-byte integer value per dimension. + +`bit`::: +indexes a single bit per dimension. Useful for very high-dimensional vectors or models that specifically support bit vectors. +NOTE: when using `bit`, the number of dimensions must be a multiple of 8 and must represent the number of bits. + +==== `dims`:: (Optional, integer) @@ -205,7 +217,11 @@ API>>. Defaults to `true`. The vector similarity metric to use in kNN search. Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will be derived from the similarity, in a way that ensures scores are -positive and that a larger score corresponds to a higher ranking. Defaults to `cosine`. +positive and that a larger score corresponds to a higher ranking. +Defaults to `l2_norm` when `element_type: bit` otherwise defaults to `cosine`. + +NOTE: `bit` vectors only support `l2_norm` as their similarity metric. + + ^*^ This parameter can only be specified when `index` is `true`. + @@ -217,6 +233,9 @@ Computes similarity based on the L^2^ distance (also known as Euclidean distance) between the vectors. The document `_score` is computed as `1 / (1 + l2_norm(query, vector)^2)`. +For `bit` vectors, instead of using `l2_norm`, the `hamming` distance between the vectors is used. The `_score` +transformation is `(numBits - hamming(a, b)) / numBits` + `dot_product`::: Computes the dot product of two unit vectors. This option provides an optimized way to perform cosine similarity. The constraints and computed score are defined @@ -233,10 +252,11 @@ The document `_score` is computed as where `dims` is the number of dimensions per vector. `cosine`::: -Computes the cosine similarity. Note that the most efficient way to perform -cosine similarity is to normalize all vectors to unit length, and instead use -`dot_product`. You should only use `cosine` if you need to preserve the -original vectors and cannot normalize them in advance. The document `_score` +Computes the cosine similarity. During indexing {es} automatically +normalizes vectors with `cosine` similarity to unit length. This allows +to internally use `dot_product` for computing similarity, which is more efficient. +Original un-normalized vectors can be still accessed +through scripts. The document `_score` is computed as `(1 + cosine(query, vector)) / 2`. The `cosine` similarity does not allow vectors with zero magnitude, since cosine is not defined in this case. @@ -319,3 +339,112 @@ any issues, but features in technical preview are not subject to the support SLA of official GA features. `dense_vector` fields support <> . + +[[dense-vector-index-bit]] +==== Indexing & Searching bit vectors + +When using `element_type: bit`, this will treat all vectors as bit vectors. Bit vectors utilize only a single +bit per dimension and are internally encoded as bytes. This can be useful for very high-dimensional vectors or models. + +When using `bit`, the number of dimensions must be a multiple of 8 and must represent the number of bits. Additionally, +with `bit` vectors, the typical vector similarity values are effectively all scored the same, e.g. with `hamming` distance. + +Let's compare two `byte[]` arrays, each representing 40 individual bits. + +`[-127, 0, 1, 42, 127]` in bits `1000000100000000000000010010101001111111` +`[127, -127, 0, 1, 42]` in bits `0111111110000001000000000000000100101010` + +When comparing these two bit, vectors, we first take the {wikipedia}/Hamming_distance[`hamming` distance]. + +`xor` result: +``` +1000000100000000000000010010101001111111 +^ +0111111110000001000000000000000100101010 += +1111111010000001000000010010101101010101 +``` + +Then, we gather the count of `1` bits in the `xor` result: `18`. To scale for scoring, we subtract from the total number +of bits and divide by the total number of bits: `(40 - 18) / 40 = 0.55`. This would be the `_score` betwee these two +vectors. + +Here is an example of indexing and searching bit vectors: + +[source,console] +-------------------------------------------------- +PUT my-bit-vectors +{ + "mappings": { + "properties": { + "my_vector": { + "type": "dense_vector", + "dims": 40, <1> + "element_type": "bit" + } + } + } +} +-------------------------------------------------- +<1> The number of dimensions that represents the number of bits + +[source,console] +-------------------------------------------------- +POST /my-bit-vectors/_bulk?refresh +{"index": {"_id" : "1"}} +{"my_vector": [127, -127, 0, 1, 42]} <1> +{"index": {"_id" : "2"}} +{"my_vector": "8100012a7f"} <2> +-------------------------------------------------- +// TEST[continued] +<1> 5 bytes representing the 40 bit dimensioned vector +<2> A hexidecimal string representing the 40 bit dimensioned vector + +Then, when searching, you can use the `knn` query to search for similar bit vectors: + +[source,console] +-------------------------------------------------- +POST /my-bit-vectors/_search?filter_path=hits.hits +{ + "query": { + "knn": { + "query_vector": [127, -127, 0, 1, 42], + "field": "my_vector" + } + } +} +-------------------------------------------------- +// TEST[continued] + +[source,console-result] +---- +{ + "hits": { + "hits": [ + { + "_index": "my-bit-vectors", + "_id": "1", + "_score": 1.0, + "_source": { + "my_vector": [ + 127, + -127, + 0, + 1, + 42 + ] + } + }, + { + "_index": "my-bit-vectors", + "_id": "2", + "_score": 0.55, + "_source": { + "my_vector": "8100012a7f" + } + } + ] + } +} +---- + diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index a6f41a38f559c..59d307c4df0ad 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -166,7 +166,6 @@ Dimension fields have the following constraints: * Field values cannot be an <>. // end::dimension[] * Dimension values are used to identify a document’s time series. If dimension values are altered in any way during indexing, the document will be stored as belonging to different from intended time series. As a result there are additional constraints: -** <> mapping parameter isn't supported. ** The field cannot use a <>. -- diff --git a/docs/reference/mapping/types/rank-features.asciidoc b/docs/reference/mapping/types/rank-features.asciidoc index b54e99ede3fae..25d5278ca220d 100644 --- a/docs/reference/mapping/types/rank-features.asciidoc +++ b/docs/reference/mapping/types/rank-features.asciidoc @@ -70,6 +70,15 @@ GET my-index-000001/_search } } } + +GET my-index-000001/_search +{ + "query": { <6> + "term": { + "topics": "economics" + } + } +} -------------------------------------------------- <1> Rank features fields must use the `rank_features` field type @@ -77,6 +86,7 @@ GET my-index-000001/_search <3> Rank features fields must be a hash with string keys and strictly positive numeric values <4> This query ranks documents by how much they are about the "politics" topic. <5> This query ranks documents inversely to the number of "1star" reviews they received. +<6> This query returns documents that store the "economics" feature in the "topics" field. NOTE: `rank_features` fields only support single-valued features and strictly diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index bbb501c4ccc36..6ee30e6b9f831 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -8,7 +8,7 @@ beta[] The `semantic_text` field type automatically generates embeddings for text -content using an inference endpoint. +content using an inference endpoint. The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. @@ -24,7 +24,7 @@ PUT my-index-000001 { "mappings": { "properties": { - "inference_field": { + "inference_field": { "type": "semantic_text", "inference_id": "my-elser-endpoint" } @@ -40,7 +40,7 @@ PUT my-index-000001 ==== Parameters for `semantic_text` fields `inference_id`:: -(Required, string) +(Required, string) Inference endpoint that will be used to generate the embeddings for the field. Use the <> to create the endpoint. @@ -137,8 +137,42 @@ field to collect the values of other fields for semantic search. Each value has its embeddings calculated separately; each field value is a separate set of chunk(s) in the resulting embeddings. -This imposes a restriction on bulk updates to documents with `semantic_text`. -In bulk requests, all fields that are copied to a `semantic_text` field must have a value to ensure every embedding is calculated correctly. +This imposes a restriction on bulk requests and ingestion pipelines that update documents with `semantic_text` fields. +In these cases, all fields that are copied to a `semantic_text` field, including the `semantic_text` field value, must have a value to ensure every embedding is calculated correctly. + +For example, the following mapping: + +[source,console] +------------------------------------------------------------ +PUT test-index +{ + "mappings": { + "properties": { + "infer_field": { + "type": "semantic_text", + "inference_id": "my-elser-endpoint" + }, + "source_field": { + "type": "text", + "copy_to": "infer_field" + } + } + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +Will need the following bulk update request to ensure that `infer_field` is updated correctly: + +[source,console] +------------------------------------------------------------ +PUT test-index/_bulk +{"update": {"_id": "1"}} +{"doc": {"infer_field": "updated inference field", "source_field": "updated source field"}} +------------------------------------------------------------ +// TEST[skip:TBD] + +Notice that both the `semantic_text` field and the source field are updated in the bulk request. [discrete] [[limitations]] diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index a382753cb6ed3..d0c2c83b8a8fa 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -94,6 +94,6 @@ Negative values will be rejected. NOTE: `sparse_vector` fields do not support querying, sorting or aggregating. They may only be used within specialized queries. The recommended query to use on these fields are <> queries. -They may also be used within <> queries. +They may also be used within legacy <> queries. NOTE: `sparse_vector` fields only preserve 9 significant bits for the precision, which translates to a relative error of about 0.4%. diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 51a2898b5d598..0690f60495c97 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -1,5 +1,6 @@ include::migration_intro.asciidoc[] +* <> * <> * <> * <> @@ -17,6 +18,7 @@ include::migration_intro.asciidoc[] * <> * <> +include::migrate_8_16.asciidoc[] include::migrate_8_15.asciidoc[] include::migrate_8_14.asciidoc[] include::migrate_8_13.asciidoc[] diff --git a/docs/reference/migration/migrate_8_16.asciidoc b/docs/reference/migration/migrate_8_16.asciidoc new file mode 100644 index 0000000000000..aea6322f292bf --- /dev/null +++ b/docs/reference/migration/migrate_8_16.asciidoc @@ -0,0 +1,20 @@ +[[migrating-8.16]] +== Migrating to 8.16 +++++ +8.16 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} 8.16. + +See also <> and <>. + +coming::[8.16.0] + + +[discrete] +[[breaking-changes-8.16]] +=== Breaking changes + +There are no breaking changes in {es} 8.16. + diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 3da5df4f16414..75eaca88c66b1 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -11,12 +11,12 @@ An entire cluster may be set to read-only with the following setting: (<>) Make the whole cluster read only (indices do not accept write operations), metadata is not allowed to be modified (create or delete - indices). + indices). Defaults to `false`. `cluster.blocks.read_only_allow_delete`:: (<>) Identical to `cluster.blocks.read_only` but allows to delete indices - to free up resources. + to free up resources. Defaults to `false`. WARNING: Don't rely on this setting to prevent changes to your cluster. Any user with access to the <> diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index e43ec076578d4..003974815c4bd 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -33,6 +33,39 @@ a single response. Defaults to 65,536. + Requests that attempt to return more than this limit will return an error. +[[search-settings-only-allowed-scripts]] +`search.aggs.only_allowed_metric_scripts`:: +(<>, boolean) +Configures whether only explicitly allowed scripts can be used in +<>. +Defaults to `false`. ++ +Requests using scripts not contained in either +<> +or +<> +will return an error. + +[[search-settings-allowed-inline-scripts]] +`search.aggs.allowed_inline_metric_scripts`:: +(<>, list of strings) +List of inline scripts that can be used in scripted metrics aggregations when +<> +is set to `true`. +Defaults to an empty list. ++ +Requests using other inline scripts will return an error. + +[[search-settings-allowed-stored-scripts]] +`search.aggs.allowed_stored_metric_scripts`:: +(<>, list of strings) +List of ids of stored scripts that can be used in scripted metrics aggregations when +<> +is set to `true`. +Defaults to an empty list. ++ +Requests using other stored scripts will return an error. + [[indices-query-bool-max-nested-depth]] `indices.query.bool.max_nested_depth`:: (<>, integer) Maximum nested depth of queries. Defaults to `30`. diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 55c236ce43574..593aa79ded4d9 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -153,6 +153,8 @@ The only requirements are that each node must be: cluster, and by any remote clusters that will discover it using <>. +Each node must have its own distinct publish address. + If you specify the transport publish address using a hostname then {es} will resolve this hostname to an IP address once during startup, and other nodes will use the resulting IP address instead of resolving the name again diff --git a/docs/reference/query-dsl.asciidoc b/docs/reference/query-dsl.asciidoc index 4d5504e5fe7ae..2f8f07f21f648 100644 --- a/docs/reference/query-dsl.asciidoc +++ b/docs/reference/query-dsl.asciidoc @@ -72,14 +72,12 @@ include::query-dsl/match-all-query.asciidoc[] include::query-dsl/span-queries.asciidoc[] +include::query-dsl/vector-queries.asciidoc[] + include::query-dsl/special-queries.asciidoc[] include::query-dsl/term-level-queries.asciidoc[] -include::query-dsl/text-expansion-query.asciidoc[] - -include::query-dsl/sparse-vector-query.asciidoc[] - include::query-dsl/minimum-should-match.asciidoc[] include::query-dsl/multi-term-rewrite.asciidoc[] diff --git a/docs/reference/query-dsl/knn-query.asciidoc b/docs/reference/query-dsl/knn-query.asciidoc index b7ded6929ed21..05a00b9949912 100644 --- a/docs/reference/query-dsl/knn-query.asciidoc +++ b/docs/reference/query-dsl/knn-query.asciidoc @@ -50,7 +50,8 @@ POST my-image-index/_bulk?refresh=true ---- //TEST[continued] -. Run the search using the `knn` query, asking for the top 3 nearest vectors. +. Run the search using the `knn` query, asking for the top 10 nearest vectors +from each shard, and then combine shard results to get the top 3 global results. + [source,console] ---- @@ -61,18 +62,13 @@ POST my-image-index/_search "knn": { "field": "image-vector", "query_vector": [-5, 9, -12], - "num_candidates": 10 + "k": 10 } } } ---- //TEST[continued] -NOTE: `knn` query doesn't have a separate `k` parameter. `k` is defined by -`size` parameter of a search request similar to other queries. `knn` query -collects `num_candidates` results from each shard, then merges them to get -the top `size` results. - [[knn-query-top-level-parameters]] ==== Top-level parameters for `knn` @@ -99,14 +95,21 @@ Either this or `query_vector_builder` must be provided. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector-builder] -- +`k`:: ++ +-- +(Optional, integer) The number of nearest neighbors to return from each shard. +{es} collects `k` results from each shard, then merges them to find the global top results. +This value must be less than or equal to `num_candidates`. Defaults to `num_candidates`. +-- `num_candidates`:: + -- -(Optional, integer) The number of nearest neighbor candidates to consider per shard. -Cannot exceed 10,000. {es} collects `num_candidates` results from each shard, then -merges them to find the top results. Increasing `num_candidates` tends to improve the -accuracy of the final results. Defaults to `Math.min(1.5 * size, 10_000)`. +(Optional, integer) The number of nearest neighbor candidates to consider per shard +while doing knn search. Cannot exceed 10,000. Increasing `num_candidates` tends to +improve the accuracy of the final results. +Defaults to `1.5 * k` if `k` is set, or `1.5 * size` if `k` is not set. -- `filter`:: @@ -160,7 +163,7 @@ Also filters from <> are applied as pre-filters. All other filters found in the Query DSL tree are applied as post-filters. For example, `knn` query finds the top 3 documents with the nearest vectors -(num_candidates=3), which are combined with `term` filter, that is +(k=3), which are combined with `term` filter, that is post-filtered. The final set of documents will contain only a single document that passes the post-filter. @@ -176,7 +179,7 @@ POST my-image-index/_search "knn": { "field": "image-vector", "query_vector": [-5, 9, -12], - "num_candidates": 3 + "k": 3 } }, "filter" : { @@ -217,7 +220,7 @@ POST my-image-index/_search "knn": { "field": "image-vector", "query_vector": [-5, 9, -12], - "num_candidates": 10, + "k": 10, "boost": 2 } } @@ -267,8 +270,8 @@ A sample query can look like below: [[knn-query-aggregations]] ==== Knn query with aggregations -`knn` query calculates aggregations on `num_candidates` from each shard. +`knn` query calculates aggregations on top `k` documents from each shard. Thus, the final results from aggregations contain -`num_candidates * number_of_shards` documents. This is different from +`k * number_of_shards` documents. This is different from the <> where aggregations are -calculated on the global top k nearest documents. +calculated on the global top `k` nearest documents. diff --git a/docs/reference/query-dsl/rule-query.asciidoc b/docs/reference/query-dsl/rule-query.asciidoc index 0958b041af7d3..cc5616c01eecd 100644 --- a/docs/reference/query-dsl/rule-query.asciidoc +++ b/docs/reference/query-dsl/rule-query.asciidoc @@ -6,8 +6,6 @@ Rule ++++ -preview::[] - [WARNING] ==== `rule_query` was renamed to `rule` in 8.15.0. diff --git a/docs/reference/query-dsl/semantic-query.asciidoc b/docs/reference/query-dsl/semantic-query.asciidoc index 23bcb4a52ef38..d0eb2da95ebc6 100644 --- a/docs/reference/query-dsl/semantic-query.asciidoc +++ b/docs/reference/query-dsl/semantic-query.asciidoc @@ -32,11 +32,11 @@ GET my-index-000001/_search [[semantic-query-params]] ==== Top-level parameters for `semantic` -field:: +`field`:: (Required, string) The `semantic_text` field to perform the query on. -query:: +`query`:: (Required, string) The query text to be searched for on the field. diff --git a/docs/reference/query-dsl/sparse-vector-query.asciidoc b/docs/reference/query-dsl/sparse-vector-query.asciidoc index 9a269ad9712a8..08dd7ab7f4470 100644 --- a/docs/reference/query-dsl/sparse-vector-query.asciidoc +++ b/docs/reference/query-dsl/sparse-vector-query.asciidoc @@ -1,5 +1,5 @@ [[query-dsl-sparse-vector-query]] -== Sparse vector query +=== Sparse vector query ++++ Sparse vector @@ -19,7 +19,7 @@ For example, a stored vector `{"feature_0": 0.12, "feature_1": 1.2, "feature_2": [discrete] [[sparse-vector-query-ex-request]] -=== Example request using an {nlp} model +==== Example request using an {nlp} model [source,console] ---- @@ -37,7 +37,7 @@ GET _search // TEST[skip: Requires inference] [discrete] -=== Example request using precomputed vectors +==== Example request using precomputed vectors [source,console] ---- @@ -55,29 +55,35 @@ GET _search [discrete] [[sparse-vector-field-params]] -=== Top level parameters for `sparse_vector` +==== Top level parameters for `sparse_vector` -``::: -(Required, object) The name of the field that contains the token-weight pairs to be searched against. +`field`:: +(Required, string) The name of the field that contains the token-weight pairs to be searched against. -`inference_id`:::: +`inference_id`:: (Optional, string) The <> to use to convert the query text into token-weight pairs. It must be the same inference ID that was used to create the tokens from the input text. Only one of `inference_id` and `query_vector` is allowed. If `inference_id` is specified, `query` must also be specified. -`query`:::: +`query`:: (Optional, string) The query text you want to use for search. If `inference_id` is specified, `query` must also be specified. +If `query_vector` is specified, `query` must not be specified. -`prune` :::: +`query_vector`:: +(Optional, dictionary) A dictionary of token-weight pairs representing the precomputed query vector to search. +Searching using this query vector will bypass additional inference. +Only one of `inference_id` and `query_vector` is allowed. + +`prune` :: (Optional, boolean) preview:[] Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. If `prune` is true but the `pruning_config` is not specified, pruning will occur but default values will be used. Default: false. -`pruning_config` :::: +`pruning_config` :: (Optional, object) preview:[] Optional pruning configuration. @@ -86,7 +92,7 @@ This is only used if `prune` is set to `true`. If `prune` is set to `true` but `pruning_config` is not specified, default values will be used. + -- -Parameters for `` are: +Parameters for `pruning_config` are: `tokens_freq_ratio_threshold`:: (Optional, integer) @@ -114,7 +120,7 @@ NOTE: The default values for `tokens_freq_ratio_threshold` and `tokens_weight_th [discrete] [[sparse-vector-query-example]] -=== Example ELSER query +==== Example ELSER query The following is an example of the `sparse_vector` query that references the ELSER model to perform semantic search. For a more detailed description of how to perform semantic search by using ELSER and the `sparse_vector` query, refer to <>. @@ -235,7 +241,7 @@ GET my-index/_search [discrete] [[sparse-vector-query-with-pruning-config-and-rescore-example]] -=== Example ELSER query with pruning configuration and rescore +==== Example ELSER query with pruning configuration and rescore The following is an extension to the above example that adds a preview:[] pruning configuration to the `sparse_vector` query. The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. @@ -285,3 +291,5 @@ GET my-index/_search //TEST[skip: Requires inference] NOTE: When performing <>, inference is performed on the local cluster. + + diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index 90cd9a696a6d9..a6d35d4f9b707 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -17,10 +17,6 @@ or collection of documents. This query finds queries that are stored as documents that match with the specified document. -<>:: -A query that finds the _k_ nearest vectors to a query -vector, as measured by a similarity metric. - <>:: A query that computes scores based on the values of numeric features and is able to efficiently skip non-competitive hits. @@ -32,9 +28,6 @@ This query allows a script to act as a filter. Also see the <>:: A query that allows to modify the score of a sub-query with a script. -<>:: -A query that allows you to perform semantic search. - <>:: A query that accepts other queries as json or yaml string. @@ -50,20 +43,14 @@ include::mlt-query.asciidoc[] include::percolate-query.asciidoc[] -include::knn-query.asciidoc[] - include::rank-feature-query.asciidoc[] include::script-query.asciidoc[] include::script-score-query.asciidoc[] -include::semantic-query.asciidoc[] - include::wrapper-query.asciidoc[] include::pinned-query.asciidoc[] include::rule-query.asciidoc[] - -include::weighted-tokens-query.asciidoc[] diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 5b3f98b5e1ea8..8faecad1dbdb9 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -1,17 +1,18 @@ [[query-dsl-text-expansion-query]] -== Text expansion query +=== Text expansion query + ++++ Text expansion ++++ -The text expansion query uses a {nlp} model to convert the query text into a -list of token-weight pairs which are then used in a query against a +deprecated[8.15.0, This query has been replaced by <>.] + +The text expansion query uses a {nlp} model to convert the query text into a list of token-weight pairs which are then used in a query against a <> or <> field. [discrete] [[text-expansion-query-ex-request]] -=== Example request - +==== Example request [source,console] ---- @@ -31,31 +32,27 @@ GET _search [discrete] [[text-expansion-query-params]] -=== Top level parameters for `text_expansion` +==== Top level parameters for `text_expansion` ``::: -(Required, object) -The name of the field that contains the token-weight pairs the NLP model created -based on the input text. +(Required, object) The name of the field that contains the token-weight pairs the NLP model created based on the input text. [discrete] [[text-expansion-rank-feature-field-params]] -=== Top level parameters for `` +==== Top level parameters for `` `model_id`:::: -(Required, string) -The ID of the model to use to convert the query text into token-weight pairs. It -must be the same model ID that was used to create the tokens from the input -text. +(Required, string) The ID of the model to use to convert the query text into token-weight pairs. +It must be the same model ID that was used to create the tokens from the input text. `model_text`:::: -(Required, string) -The query text you want to use for search. +(Required, string) The query text you want to use for search. `pruning_config` :::: (Optional, object) preview:[] -Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. +Optional pruning configuration. +If enabled, this will omit non-significant tokens from the query in order to improve query performance. Default: Disabled. + -- @@ -87,12 +84,10 @@ NOTE: The default values for `tokens_freq_ratio_threshold` and `tokens_weight_th [discrete] [[text-expansion-query-example]] -=== Example ELSER query +==== Example ELSER query -The following is an example of the `text_expansion` query that references the -ELSER model to perform semantic search. For a more detailed description of how -to perform semantic search by using ELSER and the `text_expansion` query, refer -to <>. +The following is an example of the `text_expansion` query that references the ELSER model to perform semantic search. +For a more detailed description of how to perform semantic search by using ELSER and the `text_expansion` query, refer to <>. [source,console] ---- @@ -155,8 +150,7 @@ GET my-index/_search ---- // TEST[skip: TBD] -This can also be achieved using <>, -through an <> with multiple +This can also be achieved using <>, through an <> with multiple <>. [source,console] @@ -214,7 +208,7 @@ GET my-index/_search [discrete] [[text-expansion-query-with-pruning-config-and-rescore-example]] -=== Example ELSER query with pruning configuration and rescore +==== Example ELSER query with pruning configuration and rescore The following is an extension to the above example that adds a preview:[] pruning configuration to the `text_expansion` query. The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. diff --git a/docs/reference/query-dsl/vector-queries.asciidoc b/docs/reference/query-dsl/vector-queries.asciidoc new file mode 100644 index 0000000000000..fe9f380eeb621 --- /dev/null +++ b/docs/reference/query-dsl/vector-queries.asciidoc @@ -0,0 +1,37 @@ +[[vector-queries]] +== Vector queries + +Vector queries are specialized queries that work on vector fields to efficiently perform <>. + +<>:: +A query that finds the _k_ nearest vectors to a query vector for <> fields, as measured by a similarity metric. + +<>:: +A query used to search <> field types. + +<>:: +A query that allows you to perform semantic search on <> fields. + +[discrete] +=== Deprecated vector queries + +The following queries have been deprecated and will be removed in the near future. +Use the <> query instead. + +<>:: +A query that allows you to perform sparse vector search on <> or <> fields. + +<>:: +Allows to perform text expansion queries optimizing for performance. + +include::knn-query.asciidoc[] + +include::sparse-vector-query.asciidoc[] + +include::semantic-query.asciidoc[] + +include::text-expansion-query.asciidoc[] + +include::weighted-tokens-query.asciidoc[] + + diff --git a/docs/reference/query-dsl/weighted-tokens-query.asciidoc b/docs/reference/query-dsl/weighted-tokens-query.asciidoc index cbd88eb3290dc..d4318665a9778 100644 --- a/docs/reference/query-dsl/weighted-tokens-query.asciidoc +++ b/docs/reference/query-dsl/weighted-tokens-query.asciidoc @@ -1,9 +1,11 @@ [[query-dsl-weighted-tokens-query]] === Weighted tokens query + ++++ Weighted tokens ++++ +deprecated[8.15.0, This query has been replaced by the <> and will be removed in an upcoming release.] preview::[] The weighted tokens query requires a list of token-weight pairs that are sent in with a query rather than calculated using a {nlp} model. @@ -40,32 +42,28 @@ POST _search === Top level parameters for `weighted_token` ``::: -(Required, dictionary) -A dictionary of token-weight pairs. +(Required, dictionary) A dictionary of token-weight pairs. `pruning_config` :::: -(Optional, object) -Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. +(Optional, object) Optional pruning configuration. +If enabled, this will omit non-significant tokens from the query in order to improve query performance. Default: Disabled. + -- Parameters for `` are: `tokens_freq_ratio_threshold`:: -(Optional, integer) -Tokens whose frequency is more than `tokens_freq_ratio_threshold` times the average frequency of all tokens in the specified field are considered outliers and pruned. +(Optional, integer) Tokens whose frequency is more than `tokens_freq_ratio_threshold` times the average frequency of all tokens in the specified field are considered outliers and pruned. This value must between 1 and 100. Default: `5`. `tokens_weight_threshold`:: -(Optional, float) -Tokens whose weight is less than `tokens_weight_threshold` are considered nonsignificant and pruned. +(Optional, float) Tokens whose weight is less than `tokens_weight_threshold` are considered nonsignificant and pruned. This value must be between 0 and 1. Default: `0.4`. `only_score_pruned_tokens`:: -(Optional, boolean) -If `true` we only input pruned tokens into scoring, and discard non-pruned tokens. +(Optional, boolean) If `true` we only input pruned tokens into scoring, and discard non-pruned tokens. It is strongly recommended to set this to `false` for the main query, but this can be set to `true` for a rescore query to get more relevant results. Default: `false`. diff --git a/docs/reference/query-rules/apis/delete-query-rule.asciidoc b/docs/reference/query-rules/apis/delete-query-rule.asciidoc new file mode 100644 index 0000000000000..01b73033aa361 --- /dev/null +++ b/docs/reference/query-rules/apis/delete-query-rule.asciidoc @@ -0,0 +1,74 @@ +[role="xpack"] +[[delete-query-rule]] +=== Delete query rule + +++++ +Delete query rule +++++ + +Removes an individual query rule within an existing query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule via the <> API. + +[[delete-query-rule-request]] +==== {api-request-title} + +`DELETE _query_rules//_rule/` + +[[delete-query-rule-prereq]] +==== {api-prereq-title} + +Requires the `manage_search_query_rules` privilege. + +[[delete-query_rule-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +``:: +(Required, string) + +[[delete-query-rule-response-codes]] +==== {api-response-codes-title} + +`400`:: +Missing `ruleset_id`, `rule_id`, or both. + +`404` (Missing resources):: +No query ruleset matching `ruleset_id` could be found, or else no rule matching `rule_id` was found in that ruleset. + +[[delete-query-rule-example]] +==== {api-examples-title} + +The following example deletes the query rule with ID `my-rule1` from the query ruleset named `my-ruleset`: + +//// +[source,console] +---- +PUT _query_rules/my-ruleset +{ + "rules": [ + { + "rule_id": "my-rule1", + "type": "pinned", + "criteria": [ + { + "type": "exact", + "metadata": "query_string", + "values": [ "marvel" ] + } + ], + "actions": { + "ids": ["id1"] + } + } + ] +} +---- +// TESTSETUP +//// + +[source,console] +---- +DELETE _query_rules/my-ruleset/_rule/my-rule1 +---- diff --git a/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc b/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc index dc18e2b486eb1..31507dce3d12d 100644 --- a/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc @@ -2,8 +2,6 @@ [[delete-query-ruleset]] === Delete query ruleset -preview::[] - ++++ Delete query ruleset ++++ diff --git a/docs/reference/query-rules/apis/get-query-rule.asciidoc b/docs/reference/query-rules/apis/get-query-rule.asciidoc new file mode 100644 index 0000000000000..56713965d7bdc --- /dev/null +++ b/docs/reference/query-rules/apis/get-query-rule.asciidoc @@ -0,0 +1,130 @@ +[role="xpack"] +[[get-query-rule]] +=== Get query rule + +++++ +Get query rule +++++ + +Retrieves information about an individual query rule within a query ruleset. + +[[get-query-rule-request]] +==== {api-request-title} + +`GET _query_rules//_rule/` + +[[get-query-rule-prereq]] +==== {api-prereq-title} + +Requires the `manage_search_query_rules` privilege. + +[[get-query-rule-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +``:: +(Required, string) + +[[get-query-rule-response-codes]] +==== {api-response-codes-title} + +`400`:: +Missing `ruleset_id` or `rule_id`, or both. + +`404` (Missing resources):: +Either no query ruleset matching `ruleset_id` could be found, or no rule matching `rule_id` could be found within that ruleset. + +[[get-query-rule-example]] +==== {api-examples-title} + +The following example gets the query rule with ID `my-rule1` from the ruleset named `my-ruleset`: + +//// + +[source,console] +-------------------------------------------------- +PUT _query_rules/my-ruleset +{ + "rules": [ + { + "rule_id": "my-rule1", + "type": "pinned", + "criteria": [ + { + "type": "contains", + "metadata": "query_string", + "values": [ "pugs", "puggles" ] + } + ], + "actions": { + "ids": [ + "id1", + "id2" + ] + } + }, + { + "rule_id": "my-rule2", + "type": "pinned", + "criteria": [ + { + "type": "fuzzy", + "metadata": "query_string", + "values": [ "rescue dogs" ] + } + ], + "actions": { + "docs": [ + { + "_index": "index1", + "_id": "id3" + }, + { + "_index": "index2", + "_id": "id4" + } + ] + } + } + ] +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _query_rules/my-ruleset +-------------------------------------------------- +// TEARDOWN + +//// + +[source,console] +---- +GET _query_rules/my-ruleset/_rule/my-rule1 +---- + +A sample response: + +[source,console-result] +---- +{ + "rule_id": "my-rule1", + "type": "pinned", + "criteria": [ + { + "type": "contains", + "metadata": "query_string", + "values": [ "pugs", "puggles" ] + } + ], + "actions": { + "ids": [ + "id1", + "id2" + ] + } +} +---- diff --git a/docs/reference/query-rules/apis/get-query-ruleset.asciidoc b/docs/reference/query-rules/apis/get-query-ruleset.asciidoc index 303ad4da3ec7b..6bbcd157ea9e1 100644 --- a/docs/reference/query-rules/apis/get-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/get-query-ruleset.asciidoc @@ -2,8 +2,6 @@ [[get-query-ruleset]] === Get query ruleset -preview::[] - ++++ Get query ruleset ++++ diff --git a/docs/reference/query-rules/apis/index.asciidoc b/docs/reference/query-rules/apis/index.asciidoc index 2ab5be069a6d5..f7303647f8515 100644 --- a/docs/reference/query-rules/apis/index.asciidoc +++ b/docs/reference/query-rules/apis/index.asciidoc @@ -22,9 +22,15 @@ Use the following APIs to manage query rulesets: * <> * <> * <> +* <> +* <> +* <> include::put-query-ruleset.asciidoc[] include::get-query-ruleset.asciidoc[] include::list-query-rulesets.asciidoc[] include::delete-query-ruleset.asciidoc[] +include::put-query-rule.asciidoc[] +include::get-query-rule.asciidoc[] +include::delete-query-rule.asciidoc[] diff --git a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc index 4eba65dceeea4..6832934f6985c 100644 --- a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc +++ b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc @@ -2,8 +2,6 @@ [[list-query-rulesets]] === List query rulesets -preview::[] - ++++ List query rulesets ++++ diff --git a/docs/reference/query-rules/apis/put-query-rule.asciidoc b/docs/reference/query-rules/apis/put-query-rule.asciidoc new file mode 100644 index 0000000000000..2b9a6ba892b84 --- /dev/null +++ b/docs/reference/query-rules/apis/put-query-rule.asciidoc @@ -0,0 +1,144 @@ +[role="xpack"] +[[put-query-rule]] +=== Create or update query rule + +++++ +Create or update query rule +++++ + +Creates or updates an individual query rule within a query ruleset. + +[[put-query-rule-request]] +==== {api-request-title} + +`PUT _query_rules//_rule/` + +[[put-query-rule-prereqs]] +==== {api-prereq-title} + +Requires the `manage_search_query_rules` privilege. + +[role="child_attributes"] +[[put-query-rule-request-body]] +(Required, object) Contains parameters for a query rule: + +==== {api-request-body-title} + +`type`:: +(Required, string) The type of rule. +At this time only `pinned` query rule types are allowed. + +`criteria`:: +(Required, array of objects) The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. + +Criteria must have the following information: + +- `type` (Required, string) The type of criteria. +The following criteria types are supported: ++ +-- +- `exact` +Only exact matches meet the criteria defined by the rule. +Applicable for string or numerical values. +- `fuzzy` +Exact matches or matches within the allowed {wikipedia}/Levenshtein_distance[Levenshtein Edit Distance] meet the criteria defined by the rule. +Only applicable for string values. +- `prefix` +Matches that start with this value meet the criteria defined by the rule. +Only applicable for string values. +- `suffix` +Matches that end with this value meet the criteria defined by the rule. +Only applicable for string values. +- `contains` +Matches that contain this value anywhere in the field meet the criteria defined by the rule. +Only applicable for string values. +- `lt` +Matches with a value less than this value meet the criteria defined by the rule. +Only applicable for numerical values. +- `lte` +Matches with a value less than or equal to this value meet the criteria defined by the rule. +Only applicable for numerical values. +- `gt` +Matches with a value greater than this value meet the criteria defined by the rule. +Only applicable for numerical values. +- `gte` +Matches with a value greater than or equal to this value meet the criteria defined by the rule. +Only applicable for numerical values. +- `always` +Matches all queries, regardless of input. +-- +- `metadata` (Optional, string) The metadata field to match against. +This metadata will be used to match against `match_criteria` sent in the <>. +Required for all criteria types except `global`. +- `values` (Optional, array of strings) The values to match against the metadata field. +Only one value must match for the criteria to be met. +Required for all criteria types except `global`. + +`actions`:: +(Required, object) The actions to take when the rule is matched. +The format of this action depends on the rule type. + +Actions depend on the rule type. +For `pinned` rules, actions follow the format specified by the <>. +The following actions are allowed: + +- `ids` (Optional, array of strings) The unique <> of the documents to pin. +Only one of `ids` or `docs` may be specified, and at least one must be specified. +- `docs` (Optional, array of objects) The documents to pin. +Only one of `ids` or `docs` may be specified, and at least one must be specified. +You can specify the following attributes for each document: ++ +-- +- `_index` (Required, string) The index of the document to pin. +- `_id` (Required, string) The unique <>. +-- + +IMPORTANT: Due to limitations within <>, you can only pin documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + +[[put-query-rule-example]] +==== {api-examples-title} + +The following example creates a new query rule with the ID `my-rule1` in a query ruleset called `my-ruleset`. + +`my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` _or_ `puggles` **and** `user_country` exactly matches `us`. + +[source,console] +---- +PUT _query_rules/my-ruleset/_rule/my-rule1 +{ + "type": "pinned", + "criteria": [ + { + "type": "contains", + "metadata": "user_query", + "values": [ "pugs", "puggles" ] + }, + { + "type": "exact", + "metadata": "user_country", + "values": [ "us" ] + } + ], + "actions": { + "ids": [ + "id1", + "id2" + ] + } +} +---- +// TESTSETUP + +////////////////////////// + +[source,console] +-------------------------------------------------- +DELETE _query_rules/my-ruleset +-------------------------------------------------- +// TEARDOWN + +////////////////////////// diff --git a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc index 0d41496a505da..012060e1004ae 100644 --- a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc @@ -2,8 +2,6 @@ [[put-query-ruleset]] === Create or update query ruleset -preview::[] - ++++ Create or update query ruleset ++++ diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 2e043834c9969..6a03ed03f2610 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,7 +6,10 @@ This section summarizes the changes in each release. +* <> * <> +* <> +* <> * <> * <> * <> @@ -68,7 +71,10 @@ This section summarizes the changes in each release. -- +include::release-notes/8.16.0.asciidoc[] include::release-notes/8.15.0.asciidoc[] +include::release-notes/8.14.3.asciidoc[] +include::release-notes/8.14.2.asciidoc[] include::release-notes/8.14.1.asciidoc[] include::release-notes/8.14.0.asciidoc[] include::release-notes/8.13.4.asciidoc[] diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc index dba4fdbe5f67e..197a417e0eff4 100644 --- a/docs/reference/release-notes/8.13.0.asciidoc +++ b/docs/reference/release-notes/8.13.0.asciidoc @@ -21,6 +21,19 @@ This affects clusters running version 8.10 or later, with an active downsampling https://www.elastic.co/guide/en/elasticsearch/reference/current/downsampling-ilm.html[configuration] or a configuration that was activated at some point since upgrading to version 8.10 or later. +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + [[breaking-8.13.0]] [float] === Breaking changes @@ -457,5 +470,3 @@ Search:: * Upgrade to Lucene 9.9.0 {es-pull}102782[#102782] * Upgrade to Lucene 9.9.1 {es-pull}103387[#103387] * Upgrade to Lucene 9.9.2 {es-pull}104753[#104753] - - diff --git a/docs/reference/release-notes/8.13.1.asciidoc b/docs/reference/release-notes/8.13.1.asciidoc index 7b3dbff74cc6e..f176c124e5e3b 100644 --- a/docs/reference/release-notes/8.13.1.asciidoc +++ b/docs/reference/release-notes/8.13.1.asciidoc @@ -3,6 +3,22 @@ Also see <>. +[[known-issues-8.13.1]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + [[bug-8.13.1]] [float] @@ -35,5 +51,3 @@ Transform:: Transform:: * Raise loglevel of events related to transform lifecycle from DEBUG to INFO {es-pull}106602[#106602] - - diff --git a/docs/reference/release-notes/8.13.2.asciidoc b/docs/reference/release-notes/8.13.2.asciidoc index 514118f5ea575..c4340a200e0c5 100644 --- a/docs/reference/release-notes/8.13.2.asciidoc +++ b/docs/reference/release-notes/8.13.2.asciidoc @@ -3,6 +3,22 @@ Also see <>. +[[known-issues-8.13.2]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + [[bug-8.13.2]] [float] @@ -36,5 +52,3 @@ Packaging:: Security:: * Query API Key Information API support for the `typed_keys` request parameter {es-pull}106873[#106873] (issue: {es-issue}106817[#106817]) * Query API Keys support for both `aggs` and `aggregations` keywords {es-pull}107054[#107054] (issue: {es-issue}106839[#106839]) - - diff --git a/docs/reference/release-notes/8.13.3.asciidoc b/docs/reference/release-notes/8.13.3.asciidoc index 9aee0dd815f6d..759b879e16685 100644 --- a/docs/reference/release-notes/8.13.3.asciidoc +++ b/docs/reference/release-notes/8.13.3.asciidoc @@ -10,6 +10,22 @@ Also see <>. SQL:: * Limit how much space some string functions can use {es-pull}107333[#107333] +[[known-issues-8.13.3]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + [[bug-8.13.3]] [float] === Bug fixes @@ -42,5 +58,3 @@ Search:: ES|QL:: * ESQL: Introduce language versioning to REST API {es-pull}106824[#106824] - - diff --git a/docs/reference/release-notes/8.13.4.asciidoc b/docs/reference/release-notes/8.13.4.asciidoc index bf3f2f497d8fc..d8d0b632c734a 100644 --- a/docs/reference/release-notes/8.13.4.asciidoc +++ b/docs/reference/release-notes/8.13.4.asciidoc @@ -3,6 +3,22 @@ Also see <>. +[[known-issues-8.13.4]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + [[bug-8.13.4]] [float] === Bug fixes @@ -18,5 +34,3 @@ Snapshot/Restore:: TSDB:: * Fix tsdb codec when doc-values spread in two blocks {es-pull}108276[#108276] - - diff --git a/docs/reference/release-notes/8.14.0.asciidoc b/docs/reference/release-notes/8.14.0.asciidoc index 42f2f86a123ed..87b931fd05906 100644 --- a/docs/reference/release-notes/8.14.0.asciidoc +++ b/docs/reference/release-notes/8.14.0.asciidoc @@ -12,6 +12,22 @@ Security:: * Apply stricter Document Level Security (DLS) rules for the validate query API with the rewrite parameter {es-pull}105709[#105709] * Apply stricter Document Level Security (DLS) rules for terms aggregations when min_doc_count is set to 0 {es-pull}105714[#105714] +[[known-issues-8.14.0]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + [[bug-8.14.0]] [float] === Bug fixes @@ -346,5 +362,3 @@ Network:: Packaging:: * Update bundled JDK to Java 22 (again) {es-pull}108654[#108654] - - diff --git a/docs/reference/release-notes/8.14.1.asciidoc b/docs/reference/release-notes/8.14.1.asciidoc index f161c7d08099c..b35c1e651c767 100644 --- a/docs/reference/release-notes/8.14.1.asciidoc +++ b/docs/reference/release-notes/8.14.1.asciidoc @@ -4,6 +4,22 @@ Also see <>. +[[known-issues-8.14.1]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + [[bug-8.14.1]] [float] === Bug fixes @@ -32,5 +48,3 @@ Vector Search:: Infra/Settings:: * Add remove index setting command {es-pull}109276[#109276] - - diff --git a/docs/reference/release-notes/8.14.2.asciidoc b/docs/reference/release-notes/8.14.2.asciidoc new file mode 100644 index 0000000000000..9c21cf6de466c --- /dev/null +++ b/docs/reference/release-notes/8.14.2.asciidoc @@ -0,0 +1,52 @@ +[[release-notes-8.14.2]] +== {es} version 8.14.2 + +Also see <>. + +[[known-issues-8.14.2]] +[float] +=== Known issues +* When upgrading clusters from version 8.12.2 or earlier, if your cluster contains non-master-eligible nodes, +information about the new functionality of these upgraded nodes may not be registered properly with the master node. +This can lead to some new functionality added since 8.13.0 not being accessible on the upgraded cluster. +If your cluster is running on ECK 2.12.1 and above, this may cause problems with finalizing the upgrade. +To resolve this issue, perform a rolling restart on the non-master-eligible nodes once all Elasticsearch nodes +are upgraded. + +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + +[[bug-8.14.2]] +[float] +=== Bug fixes + +Data streams:: +* Ensure a lazy rollover request will rollover the target data stream once. {es-pull}109636[#109636] +* [Data streams] Fix the description of the lazy rollover task {es-pull}109629[#109629] + +ES|QL:: +* Fix ESQL cancellation for exchange requests {es-pull}109695[#109695] +* Fix equals and hashcode for `SingleValueQuery.LuceneQuery` {es-pull}110035[#110035] +* Force execute inactive sink reaper {es-pull}109632[#109632] + +Infra/Scripting:: +* Check array size before returning array item in script doc values {es-pull}109824[#109824] (issue: {es-issue}104998[#104998]) + +Infra/Settings:: +* Guard file settings readiness on file settings support {es-pull}109500[#109500] + +Machine Learning:: +* Fix IndexOutOfBoundsException during inference {es-pull}109533[#109533] + +Mapping:: +* Re-define `index.mapper.dynamic` setting in 8.x for a better 7.x to 8.x upgrade if this setting is used. {es-pull}109341[#109341] + +Ranking:: +* Fix for from parameter when using `sub_searches` and rank {es-pull}106253[#106253] (issue: {es-issue}99011[#99011]) + +Search:: +* Add hexstring support byte painless scorers {es-pull}109492[#109492] +* Fix automatic tracking of collapse with `docvalue_fields` {es-pull}110103[#110103] diff --git a/docs/reference/release-notes/8.14.3.asciidoc b/docs/reference/release-notes/8.14.3.asciidoc new file mode 100644 index 0000000000000..0d7d2d9d599c1 --- /dev/null +++ b/docs/reference/release-notes/8.14.3.asciidoc @@ -0,0 +1,22 @@ +[[release-notes-8.14.3]] +== {es} version 8.14.3 + +Also see <>. + +[[bug-8.14.3]] +[float] +=== Bug fixes + +Cluster Coordination:: +* Ensure tasks preserve versions in `MasterService` {es-pull}109850[#109850] + +ES|QL:: +* Introduce compute listener {es-pull}110400[#110400] + +Mapping:: +* Automatically adjust `ignore_malformed` only for the @timestamp {es-pull}109948[#109948] + +TSDB:: +* Disallow index.time_series.end_time setting from being set or updated in normal indices {es-pull}110268[#110268] (issue: {es-issue}110265[#110265]) + + diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index 97f4a51a1142f..c13c1c95c09ff 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -5,4 +5,12 @@ coming[8.15.0] Also see <>. +[[known-issues-8.15.0]] +[float] +=== Known issues +* The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. +In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` +process to reclaim memory. This can cause inference requests to fail. +Elasticsearch will automatically restart the `pytorch_inference` process +after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) diff --git a/docs/reference/release-notes/8.16.0.asciidoc b/docs/reference/release-notes/8.16.0.asciidoc new file mode 100644 index 0000000000000..7b2e7459be968 --- /dev/null +++ b/docs/reference/release-notes/8.16.0.asciidoc @@ -0,0 +1,8 @@ +[[release-notes-8.16.0]] +== {es} version 8.16.0 + +coming[8.16.0] + +Also see <>. + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index ead1596c64fdd..0ed01ff422700 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -11,7 +11,8 @@ For detailed information about this release, see the <> and // Add previous release to the list Other versions: -{ref-bare}/8.14/release-highlights.html[8.14] +{ref-bare}/8.15/release-highlights.html[8.15] +| {ref-bare}/8.14/release-highlights.html[8.14] | {ref-bare}/8.13/release-highlights.html[8.13] | {ref-bare}/8.12/release-highlights.html[8.12] | {ref-bare}/8.11/release-highlights.html[8.11] @@ -44,6 +45,73 @@ faster indexing and similar retrieval latencies. {es-pull}103374[#103374] +[discrete] +[[stricter_failure_handling_in_multi_repo_get_snapshots_request_handling]] +=== Stricter failure handling in multi-repo get-snapshots request handling +If a multi-repo get-snapshots request encounters a failure in one of the +targeted repositories then earlier versions of Elasticsearch would proceed +as if the faulty repository did not exist, except for a per-repository +failure report in a separate section of the response body. This makes it +impossible to paginate the results properly in the presence of failures. In +versions 8.15.0 and later this API's failure handling behaviour has been +made stricter, reporting an overall failure if any targeted repository's +contents cannot be listed. + +{es-pull}107191[#107191] + +[discrete] +[[add_new_int4_quantization_to_dense_vector]] +=== Add new int4 quantization to dense_vector +New int4 (half-byte) scalar quantization support via two knew index types: `int4_hnsw` and `int4_flat`. +This gives an 8x reduction from `float32` with some accuracy loss. In addition to less memory required, this +improves query and merge speed significantly when compared to raw vectors. + +{es-pull}109317[#109317] + +[discrete] +[[mark_query_rules_as_ga]] +=== Mark Query Rules as GA +This PR marks query rules as Generally Available. All APIs are no longer +in tech preview. + +{es-pull}110004[#110004] + +[discrete] +[[adds_new_bit_element_type_for_dense_vectors]] +=== Adds new `bit` `element_type` for `dense_vectors` +This adds `bit` vector support by adding `element_type: bit` for +vectors. This new element type works for indexed and non-indexed +vectors. Additionally, it works with `hnsw` and `flat` index types. No +quantization based codec works with this element type, this is +consistent with `byte` vectors. + +`bit` vectors accept up to `32768` dimensions in size and expect vectors +that are being indexed to be encoded either as a hexidecimal string or a +`byte[]` array where each element of the `byte` array represents `8` +bits of the vector. + +`bit` vectors support script usage and regular query usage. When +indexed, all comparisons done are `xor` and `popcount` summations (aka, +hamming distance), and the scores are transformed and normalized given +the vector dimensions. + +For scripts, `l1norm` is the same as `hamming` distance and `l2norm` is +`sqrt(l1norm)`. `dotProduct` and `cosineSimilarity` are not supported. + +Note, the dimensions expected by this element_type are always to be +divisible by `8`, and the `byte[]` vectors provided for index must be +have size `dim/8` size, where each byte element represents `8` bits of +the vectors. + +{es-pull}110059[#110059] + +[discrete] +[[redact_processor_generally_available]] +=== The Redact processor is Generally Available +The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. The Redact processor was initially released as Technical Preview in `8.7.0`, and is now released as Generally Available. + +{es-pull}110395[#110395] + // end::notable-highlights[] @@ -61,6 +129,17 @@ set the JVM property `es.datetime.java_time_parsers=true` on all ES nodes. {es-pull}106486[#106486] +[discrete] +[[new_custom_parser_for_more_iso_8601_date_formats]] +=== New custom parser for more ISO-8601 date formats +Following on from #106486, this extends the custom ISO-8601 datetime parser to cover the `strict_year`, +`strict_year_month`, `strict_date_time`, `strict_date_time_no_millis`, `strict_date_hour_minute_second`, +`strict_date_hour_minute_second_millis`, and `strict_date_hour_minute_second_fraction` date formats. +As before, the parser will use the existing java.time parser if there are parsing issues, and the +`es.datetime.java_time_parsers=true` JVM property will force the use of the old parsers regardless. + +{es-pull}108606[#108606] + [discrete] [[preview_support_for_connection_type_domain_isp_databases_in_geoip_processor]] === Preview: Support for the 'Connection Type, 'Domain', and 'ISP' databases in the geoip processor @@ -86,3 +165,23 @@ But, here are some particular highlights: {es-pull}109219[#109219] +[discrete] +[[synthetic_source_improvements]] +=== Synthetic `_source` improvements +There are multiple improvements to synthetic `_source` functionality: + +* Synthetic `_source` is now supported for all field types including `nested` and `object`. `object` fields are supported with `enabled` set to `false`. + +* Synthetic `_source` can be enabled together with `ignore_malformed` and `ignore_above` parameters for all field types that support them. + +{es-pull}109501[#109501] + +[discrete] +[[index_sorting_on_indexes_with_nested_fields]] +=== Index sorting on indexes with nested fields +Index sorting is now supported for indexes with mappings containing nested objects. +The index sort spec (as specified by `index.sort.field`) can't contain any nested +fields, still. + +{es-pull}110251[#110251] + diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index e537fc959965a..7c2e42a26b923 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -594,7 +594,7 @@ end::knn-filter[] tag::knn-k[] Number of nearest neighbors to return as top hits. This value must be less than -`num_candidates`. Defaults to `size`. +or equal to `num_candidates`. Defaults to `size`. end::knn-k[] tag::knn-num-candidates[] diff --git a/docs/reference/rest-api/security.asciidoc b/docs/reference/rest-api/security.asciidoc index 4571d963179a6..82cf38e52bd80 100644 --- a/docs/reference/rest-api/security.asciidoc +++ b/docs/reference/rest-api/security.asciidoc @@ -45,9 +45,12 @@ Use the following APIs to add, remove, update, and retrieve role mappings: Use the following APIs to add, remove, update, and retrieve roles in the native realm: * <> +* <> * <> * <> +* <> * <> +* <> [discrete] [[security-token-apis]] @@ -171,6 +174,8 @@ include::security/create-api-keys.asciidoc[] include::security/put-app-privileges.asciidoc[] include::security/create-role-mappings.asciidoc[] include::security/create-roles.asciidoc[] +include::security/bulk-create-roles.asciidoc[] +include::security/bulk-delete-roles.asciidoc[] include::security/create-users.asciidoc[] include::security/create-service-token.asciidoc[] include::security/delegate-pki-authentication.asciidoc[] @@ -188,6 +193,7 @@ include::security/get-app-privileges.asciidoc[] include::security/get-builtin-privileges.asciidoc[] include::security/get-role-mappings.asciidoc[] include::security/get-roles.asciidoc[] +include::security/query-role.asciidoc[] include::security/get-service-accounts.asciidoc[] include::security/get-service-credentials.asciidoc[] include::security/get-settings.asciidoc[] diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc new file mode 100644 index 0000000000000..a8072b7ba549a --- /dev/null +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -0,0 +1,328 @@ +[role="xpack"] +[[security-api-bulk-put-role]] +=== Bulk create or update roles API +preview::[] +++++ +Bulk create or update roles API +++++ + +Bulk adds and updates roles in the native realm. + +[[security-api-bulk-put-role-request]] +==== {api-request-title} + +`POST /_security/role/` + + +[[security-api-bulk-put-role-prereqs]] +==== {api-prereq-title} + +* To use this API, you must have at least the `manage_security` cluster +privilege. + +[[security-api-bulk-put-role-desc]] +==== {api-description-title} + +The role management APIs are generally the preferred way to manage roles, rather than using +<>. The bulk create +or update roles API cannot update roles that are defined in roles files. + +[[security-api-bulk-put-role-path-params]] +==== {api-path-parms-title} + +`refresh`:: +Optional setting of the {ref}/docs-refresh.html[refresh policy] for the write request. Defaults to Immediate. + +[[security-api-bulk-put-role-request-body]] +==== {api-request-body-title} + +The following parameters can be specified in the body of a POST request +and pertain to adding a set of roles: + +`roles`:: +(object) The roles to add as a role name to role map. + +==== +`` (required):: (string) The role name. +`applications`:: (list) A list of application privilege entries. +`application` (required)::: (string) The name of the application to which this entry applies. +`privileges`::: (list) A list of strings, where each element is the name of an application +privilege or action. +`resources`::: (list) A list resources to which the privileges are applied. + +`cluster`:: (list) A list of cluster privileges. These privileges define the +cluster level actions that users with this role are able to execute. + +`global`:: (object) An object defining global privileges. A global privilege is +a form of cluster privilege that is request-aware. Support for global privileges +is currently limited to the management of application privileges. + +`indices`:: (list) A list of indices permissions entries. +`field_security`::: (object) The document fields that the owners of the role have +read access to. For more information, see +<>. +`names` (required)::: (list) A list of indices (or index name patterns) to which the +permissions in this entry apply. +`privileges`(required)::: (list) The index level privileges that the owners of the role +have on the specified indices. +`query`::: A search query that defines the documents the owners of the role have +read access to. A document within the specified indices must match this query in +order for it to be accessible by the owners of the role. + +`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + +`run_as`:: (list) A list of users that the owners of this role can impersonate. +For more information, see +<>. + +`remote_indices`:: beta:[] (list) A list of remote indices permissions entries. ++ +-- +NOTE: Remote indices are effective for <>. +They have no effect for remote clusters configured with the <>. +-- +`clusters` (required)::: (list) A list of cluster aliases to which the permissions +in this entry apply. +`field_security`::: (object) The document fields that the owners of the role have +read access to. For more information, see +<>. +`names` (required)::: (list) A list of indices (or index name patterns) on the remote clusters +(specified with `clusters`) to which the permissions in this entry apply. +`privileges`(required)::: (list) The index level privileges that the owners of the role +have on the specified indices. +`query`::: A search query that defines the documents the owners of the role have +read access to. A document within the specified indices must match this query in +order for it to be accessible by the owners of the role. + +For more information, see <>. +==== + +[[security-bulk-api-put-role-example]] +==== {api-examples-title} + +The following example adds the roles called `my_admin_role` and `my_user_role`: + +[source,console] +-------------------------------------------------- +POST /_security/role +{ + "roles": { + "my_admin_role": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index1", + "index2" + ], + "privileges": [ + "all" + ], + "field_security": { + "grant": [ + "title", + "body" + ] + }, + "query": "{\"match\": {\"title\": \"foo\"}}" + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ + "admin", + "read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [ + "other_user" + ], + "metadata": { + "version": 1 + } + }, + "my_user_role": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index1" + ], + "privileges": [ + "read" + ], + "field_security": { + "grant": [ + "title", + "body" + ] + }, + "query": "{\"match\": {\"title\": \"foo\"}}" + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ + "admin", + "read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [ + "other_user" + ], + "metadata": { + "version": 1 + } + } + } +} +-------------------------------------------------- + +A successful call returns a JSON structure that shows whether the role has been +created, updated, or had no changes made. + +[source,console-result] +-------------------------------------------------- +{ + "created": [ <1> + "my_admin_role", <2> + "my_user_role" + ] +} +-------------------------------------------------- + +<1> Result type, one of `created`, `updated`, `noop`, `errors`. +<2> A list of the roles that were created. + +Because errors are handled individually for each role create or update, the API allows partial success. + +The following query would throw an error for `my_admin_role` because the privilege `bad_cluster_privilege` +doesn't exist, but would be successful for the `my_user_role`. + +[source,console] +-------------------------------------------------- +POST /_security/role +{ + "roles": { + "my_admin_role": { + "cluster": [ + "bad_cluster_privilege" + ], + "indices": [ + { + "names": [ + "index1", + "index2" + ], + "privileges": ["all"], + "field_security": { + "grant": [ + "title", + "body" + ] + }, + "query": "{\"match\": {\"title\": \"foo\"}}" + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ + "admin", + "read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [ + "other_user" + ], + "metadata": { + "version": 1 + } + }, + "my_user_role": { + "cluster": [ + "all" + ], + "indices": [ + { + "names": [ + "index1" + ], + "privileges": [ + "read" + ], + "field_security": { + "grant": [ + "title", + "body" + ] + }, + "query": "{\"match\": {\"title\": \"foo\"}}" + } + ], + "applications": [ + { + "application": "myapp", + "privileges": [ + "admin", + "read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [ + "other_user" + ], + "metadata": { + "version": 1 + } + } + } +} +-------------------------------------------------- + +The result would then have the `errors` field set to `true` and hold the error for the `my_admin_role` update. + + +[source,console-result] +-------------------------------------------------- +{ + "created": [ + "my_user_role" <1> + ], + "errors": { <2> + "count": 1, <3> + "details": { + "my_admin_role": { <4> + "type": "action_request_validation_exception", + "reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_data_stream_global_retention,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_data_stream_global_retention,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;" + } + } + } +} +-------------------------------------------------- + +<1> The successfully created role. +<2> The errors encountered. +<3> The number of put role requests that resulted in an error. +<4> The error keyed by role name. diff --git a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc new file mode 100644 index 0000000000000..a782b5e37fcb9 --- /dev/null +++ b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc @@ -0,0 +1,120 @@ +[role="xpack"] +[[security-api-bulk-delete-role]] +=== Bulk delete roles API +preview::[] +++++ +Bulk delete roles API +++++ + +Bulk deletes roles in the native realm. + +[[security-api-bulk-delete-role-request]] +==== {api-request-title} + +`DELETE /_security/role/` + +[[security-api-bulk-delete-role-prereqs]] +==== {api-prereq-title} + +* To use this API, you must have at least the `manage_security` cluster +privilege. + +[[security-api-bulk-delete-role-desc]] +==== {api-description-title} + +The role management APIs are generally the preferred way to manage roles, rather than using +<>. The bulk delete roles API cannot delete +roles that are defined in roles files. + +[[security-api-bulk-delete-role-path-params]] +==== {api-path-parms-title} + +`refresh`:: +Optional setting of the {ref}/docs-refresh.html[refresh policy] for the write request. Defaults to Immediate. + +[[security-api-bulk-delete-role-request-body]] +==== {api-request-body-title} + +The following parameters can be specified in the body of a DELETE request +and pertain to deleting a set of roles: + +`names`:: +(list) A list of role names to delete. + +[[security-bulk-api-delete-role-example]] +==== {api-examples-title} +The following example deletes a `my_admin_role` and `my_user_role` roles: + +[source,console] +-------------------------------------------------- +DELETE /_security/role +{ + "names": ["my_admin_role", "my_user_role"] +} +-------------------------------------------------- +// TEST[setup:admin_role,user_role] + +If the roles are successfully deleted, the request returns: + +[source,console-result] +-------------------------------------------------- +{ + "deleted": [ + "my_admin_role", + "my_user_role" + ] +} +-------------------------------------------------- + +If a role cannot be found, the not found roles are grouped under `not_found`: + +[source,console] +-------------------------------------------------- +DELETE /_security/role +{ + "names": ["my_admin_role", "not_an_existing_role"] +} +-------------------------------------------------- +// TEST[setup:admin_role] + +[source,console-result] +-------------------------------------------------- +{ + "deleted": [ + "my_admin_role" + ], + "not_found": [ + "not_an_existing_role" + ] +} +-------------------------------------------------- + +If a request fails or is invalid, the errors are grouped under `errors`: + +[source,console] +-------------------------------------------------- +DELETE /_security/role +{ + "names": ["my_admin_role", "superuser"] +} +-------------------------------------------------- +// TEST[setup:admin_role] + + +[source,console-result] +-------------------------------------------------- +{ + "deleted": [ + "my_admin_role" + ], + "errors": { + "count": 1, + "details": { + "superuser": { + "type": "illegal_argument_exception", + "reason": "role [superuser] is reserved and cannot be deleted" + } + } + } +} +-------------------------------------------------- diff --git a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc index bbd0ca03c0473..8435f5539ab9d 100644 --- a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc @@ -77,6 +77,7 @@ A successful call returns an object with "cluster", "index", and "remote_cluster "manage_autoscaling", "manage_behavioral_analytics", "manage_ccr", + "manage_connector", "manage_data_frame_transforms", "manage_data_stream_global_retention", "manage_enrich", @@ -102,6 +103,7 @@ A successful call returns an object with "cluster", "index", and "remote_cluster "manage_user_profile", "manage_watcher", "monitor", + "monitor_connector", "monitor_data_frame_transforms", "monitor_data_stream_global_retention", "monitor_enrich", diff --git a/docs/reference/rest-api/security/get-roles.asciidoc b/docs/reference/rest-api/security/get-roles.asciidoc index 3eb5a735194c6..3cc2f95c6ea7e 100644 --- a/docs/reference/rest-api/security/get-roles.asciidoc +++ b/docs/reference/rest-api/security/get-roles.asciidoc @@ -38,7 +38,10 @@ API cannot retrieve roles that are defined in roles files. ==== {api-response-body-title} A successful call returns an array of roles with the JSON representation of the -role. +role. The returned role format is a simple extension of the <> format, +only adding an extra field `transient_metadata.enabled`. +This field is `false` in case the role is automatically disabled, for example when the license +level does not allow some permissions that the role grants. [[security-api-get-role-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/rest-api/security/query-role.asciidoc b/docs/reference/rest-api/security/query-role.asciidoc new file mode 100644 index 0000000000000..937bd263140fc --- /dev/null +++ b/docs/reference/rest-api/security/query-role.asciidoc @@ -0,0 +1,283 @@ +[role="xpack"] +[[security-api-query-role]] +=== Query Role API + +++++ +Query Role +++++ + +Retrieves roles with <> in a <> fashion. + +[[security-api-query-role-request]] +==== {api-request-title} + +`GET /_security/_query/role` + +`POST /_security/_query/role` + +[[security-api-query-role-prereqs]] +==== {api-prereq-title} + +* To use this API, you must have at least the `read_security` cluster privilege. + +[[security-api-query-role-desc]] +==== {api-description-title} + +The role management APIs are generally the preferred way to manage roles, rather than using +<>. +The query roles API does not retrieve roles that are defined in roles files, nor <> ones. +You can optionally filter the results with a query. Also, the results can be paginated and sorted. + +[[security-api-query-role-request-body]] +==== {api-request-body-title} + +You can specify the following parameters in the request body: + +`query`:: +(Optional, string) A <> to filter which roles to return. +The query supports a subset of query types, including +<>, <>, +<>, <>, +<>, <>, +<>, <>, +<>, <>, +and <>. ++ +You can query the following values associated with a role. ++ +.Valid values for `query` +[%collapsible%open] +==== +`name`:: +(keyword) The <> of the role. + +`description`:: +(text) The <> of the role. + +`metadata`:: +(flattened) Metadata field associated with the <>, such as `metadata.app_tag`. +Note that metadata is internally indexed as a <> field type. +This means that all sub-fields act like `keyword` fields when querying and sorting. +It also implies that it is not possible to refer to a subset of metadata fields using wildcard patterns, +e.g. `metadata.field*`, even for query types that support field name patterns. +Lastly, all the metadata fields can be searched together when simply mentioning the +`metadata` field (i.e. not followed by any dot and sub-field name). + +`applications`:: +The list of <> that the role grants. + +`application`::: +(keyword) The name of the application associated to the privileges and resources. + +`privileges`::: +(keyword) The names of the privileges that the role grants. + +`resources`::: +(keyword) The resources to which the privileges apply. + +==== + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=from] ++ +By default, you cannot page through more than 10,000 hits using the `from` and +`size` parameters. To page through more hits, use the +<> parameter. + +`size`:: +(Optional, integer) The number of hits to return. Must not be negative and defaults to `10`. ++ +By default, you cannot page through more than 10,000 hits using the `from` and +`size` parameters. To page through more hits, use the +<> parameter. + +`sort`:: +(Optional, object) <>. You can sort on `username`, `roles` or `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. + +`search_after`:: +(Optional, array) <> definition. + + +[[security-api-query-role-response-body]] +==== {api-response-body-title} + +This API returns the following top level fields: + +`total`:: +The total number of roles found. + +`count`:: +The number of roles returned in the response. + +`roles`:: +A list of roles that match the query. +The returned role format is an extension of the <> format. +It adds the `transient_metadata.enabled` and the `_sort` fields. +`transient_metadata.enabled` is set to `false` in case the role is automatically disabled, +for example when the role grants privileges that are not allowed by the installed license. +`_sort` is present when the search query sorts on some field. +It contains the array of values that have been used for sorting. + +[[security-api-query-role-example]] +==== {api-examples-title} + +The following request lists all roles, sorted by the role name: + +[source,console] +---- +POST /_security/_query/role +{ + "sort": ["name"] +} +---- +// TEST[setup:admin_role,user_role] + +A successful call returns a JSON structure that contains the information +retrieved for one or more roles: + +[source,console-result] +---- +{ + "total": 2, + "count": 2, + "roles": [ <1> + { + "name" : "my_admin_role", + "cluster" : [ + "all" + ], + "indices" : [ + { + "names" : [ + "index1", + "index2" + ], + "privileges" : [ + "all" + ], + "field_security" : { + "grant" : [ + "title", + "body" + ] + }, + "allow_restricted_indices" : false + } + ], + "applications" : [ ], + "run_as" : [ + "other_user" + ], + "metadata" : { + "version" : 1 + }, + "transient_metadata" : { + "enabled" : true + }, + "description" : "Grants full access to all management features within the cluster.", + "_sort" : [ + "my_admin_role" + ] + }, + { + "name" : "my_user_role", + "cluster" : [ ], + "indices" : [ + { + "names" : [ + "index1", + "index2" + ], + "privileges" : [ + "all" + ], + "field_security" : { + "grant" : [ + "title", + "body" + ] + }, + "allow_restricted_indices" : false + } + ], + "applications" : [ ], + "run_as" : [ ], + "metadata" : { + "version" : 1 + }, + "transient_metadata" : { + "enabled" : true + }, + "description" : "Grants user access to some indicies.", + "_sort" : [ + "my_user_role" + ] + } + ] +} +---- +// TEST[continued] + +<1> The list of roles that were retrieved for this request + +Similarly, the following request can be used to query only the user access role, +given its description: + +[source,console] +---- +POST /_security/_query/role +{ + "query": { + "match": { + "description": { + "query": "user access" + } + } + }, + "size": 1 <1> +} +---- +// TEST[continued] + +<1> Return only the best matching role + +[source,console-result] +---- +{ + "total": 2, + "count": 1, + "roles": [ + { + "name" : "my_user_role", + "cluster" : [ ], + "indices" : [ + { + "names" : [ + "index1", + "index2" + ], + "privileges" : [ + "all" + ], + "field_security" : { + "grant" : [ + "title", + "body" + ] + }, + "allow_restricted_indices" : false + } + ], + "applications" : [ ], + "run_as" : [ ], + "metadata" : { + "version" : 1 + }, + "transient_metadata" : { + "enabled" : true + }, + "description" : "Grants user access to some indicies." + } + ] +} +---- diff --git a/docs/reference/rest-api/security/query-user.asciidoc b/docs/reference/rest-api/security/query-user.asciidoc index 952e0f40f2a3a..23852f0f2eed7 100644 --- a/docs/reference/rest-api/security/query-user.asciidoc +++ b/docs/reference/rest-api/security/query-user.asciidoc @@ -66,13 +66,6 @@ The email of the user. Specifies whether the user is enabled. ==== -[[security-api-query-user-query-params]] -==== {api-query-parms-title} - -`with_profile_uid`:: -(Optional, boolean) Determines whether to retrieve the <> `uid`, -if exists, for the users. Defaults to `false`. - include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=from] + By default, you cannot page through more than 10,000 hits using the `from` and @@ -93,6 +86,12 @@ In addition, sort can also be applied to the `_doc` field to sort by index order `search_after`:: (Optional, array) <> definition. +[[security-api-query-user-query-params]] +==== {api-query-parms-title} + +`with_profile_uid`:: +(Optional, boolean) Determines whether to retrieve the <> `uid`, +if exists, for the users. Defaults to `false`. [[security-api-query-user-response-body]] ==== {api-response-body-title} @@ -191,7 +190,7 @@ Use the user information retrieve the user with a query: [source,console] ---- -GET /_security/_query/user +POST /_security/_query/user { "query": { "prefix": { @@ -231,7 +230,7 @@ To retrieve the user `profile_uid` as part of the response: [source,console] -------------------------------------------------- -GET /_security/_query/user?with_profile_uid=true +POST /_security/_query/user?with_profile_uid=true { "query": { "prefix": { @@ -272,7 +271,7 @@ Use a `bool` query to issue complex logical conditions and use [source,js] ---- -GET /_security/_query/user +POST /_security/_query/user { "query": { "bool": { diff --git a/docs/reference/rest-api/security/role-mapping-resources.asciidoc b/docs/reference/rest-api/security/role-mapping-resources.asciidoc index 4c9ed582b674a..25703dc073e00 100644 --- a/docs/reference/rest-api/security/role-mapping-resources.asciidoc +++ b/docs/reference/rest-api/security/role-mapping-resources.asciidoc @@ -70,7 +70,7 @@ example, `"username": "jsmith"`. `groups`:: (array of strings) The groups to which the user belongs. For example, `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com","cn=esusers,ou=groups,dc=example,dc=com ]`. `metadata`:: -(object) Additional metadata for the user. For example, `"metadata": { "cn": "John Smith" }`. +(object) Additional metadata for the user. This can include a variety of key-value pairs. When referencing metadata fields in role mapping rules, use the dot notation to specify the key within the metadata object. If the key contains special characters such as parentheses, dots, or spaces, you must escape these characters using backslashes (`\`). For example, `"metadata": { "cn": "John Smith" }`. `realm`:: (object) The realm that authenticated the user. The only field in this object is the realm name. For example, `"realm": { "name": "ldap1" }`. diff --git a/docs/reference/rest-api/security/update-settings.asciidoc b/docs/reference/rest-api/security/update-settings.asciidoc index 652b722b0af48..b227bb70b31d7 100644 --- a/docs/reference/rest-api/security/update-settings.asciidoc +++ b/docs/reference/rest-api/security/update-settings.asciidoc @@ -18,27 +18,28 @@ Updates the settings of the security internal indices. ==== {api-request-body-title} `security`:: -(Optional, object) Settings to be used for the index used for most security +(Optional, object) Settings to be used for the index used for most security configuration, including Native realm users and roles configured via the API. `security-tokens`:: -(Optional, object) Settings to be used for the index used to store +(Optional, object) Settings to be used for the index used to store <>. -`security`:: -(Optional, object) Settings to be used for the index used to store +`security-profile`:: +(Optional, object) Settings to be used for the index used to store <> information. [[security-api-update-settings-desc]] ==== {api-description-title} -This API allows a user to modify the settings for the Security internal indices -(`.security` and associated indices). Only a subset of settings are allowed to +This API allows a user to modify the settings for the Security internal indices +(`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes: - `index.auto_expand_replicas` - `index.number_of_replicas` +NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. [[security-api-update-settings-example]] ==== {api-examples-title} @@ -62,8 +63,8 @@ PUT /_security/settings ----------------------------------------------------------- // TEST[skip:making sure all the indices have been created reliably is difficult] -The configured settings can be retrieved using the -<> API. If a given index -is not in use on the system, but settings are provided for it, the request will -be rejected - this API does not yet support configuring the settings for these +The configured settings can be retrieved using the +<> API. If a given index +is not in use on the system, but settings are provided for it, the request will +be rejected - this API does not yet support configuring the settings for these indices before they are in use. diff --git a/docs/reference/scripting/security.asciidoc b/docs/reference/scripting/security.asciidoc index 0f322d08726b9..249a705e92817 100644 --- a/docs/reference/scripting/security.asciidoc +++ b/docs/reference/scripting/security.asciidoc @@ -9,8 +9,8 @@ security in a defense in depth strategy for scripting. The second layer of security is the https://www.oracle.com/java/technologies/javase/seccodeguide.html[Java Security Manager]. As part of its startup sequence, {es} enables the Java Security Manager to limit the actions that -portions of the code can take. <> uses -the Java Security Manager as an additional layer of defense to prevent scripts +portions of the code can take. <> uses +the Java Security Manager as an additional layer of defense to prevent scripts from doing things like writing files and listening to sockets. {es} uses @@ -18,22 +18,28 @@ from doing things like writing files and listening to sockets. https://www.chromium.org/developers/design-documents/sandbox/osx-sandboxing-design[Seatbelt] in macOS, and https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147[ActiveProcessLimit] -on Windows as additional security layers to prevent {es} from forking or +on Windows as additional security layers to prevent {es} from forking or running other processes. +Finally, scripts used in +<> +can be restricted to a defined list of scripts, or forbidden altogether. +This can prevent users from running particularly slow or resource intensive aggregation +queries. + You can modify the following script settings to restrict the type of scripts -that are allowed to run, and control the available +that are allowed to run, and control the available {painless}/painless-contexts.html[contexts] that scripts can run in. To -implement additional layers in your defense in depth strategy, follow the +implement additional layers in your defense in depth strategy, follow the <>. [[allowed-script-types-setting]] [discrete] === Allowed script types setting -{es} supports two script types: `inline` and `stored`. By default, {es} is -configured to run both types of scripts. To limit what type of scripts are run, -set `script.allowed_types` to `inline` or `stored`. To prevent any scripts from +{es} supports two script types: `inline` and `stored`. By default, {es} is +configured to run both types of scripts. To limit what type of scripts are run, +set `script.allowed_types` to `inline` or `stored`. To prevent any scripts from running, set `script.allowed_types` to `none`. IMPORTANT: If you use {kib}, set `script.allowed_types` to both or just `inline`. @@ -61,3 +67,48 @@ For example, to allow scripts to run only in `scoring` and `update` contexts: ---- script.allowed_contexts: score, update ---- + +[[allowed-script-in-aggs-settings]] +[discrete] +=== Allowed scripts in scripted metrics aggregations + +By default, all scripts are permitted in +<>. +To restrict the set of allowed scripts, set +<> +to `true` and provide the allowed scripts using +<> +and/or +<>. + +To disallow certain script types, omit the corresponding script list +(`search.aggs.allowed_inline_metric_scripts` or +`search.aggs.allowed_stored_metric_scripts`) or set it to an empty array. +When both script lists are not empty, the given stored scripts and the given inline scripts +will be allowed. + +The following example permits only 4 specific stored scripts to be used, and no inline scripts: + +[source,yaml] +---- +search.aggs.only_allowed_metric_scripts: true +search.aggs.allowed_inline_metric_scripts: [] +search.aggs.allowed_stored_metric_scripts: + - script_id_1 + - script_id_2 + - script_id_3 + - script_id_4 +---- + +Conversely, the next example allows specific inline scripts but no stored scripts: + +[source,yaml] +---- +search.aggs.only_allowed_metric_scripts: true +search.aggs.allowed_inline_metric_scripts: + - 'state.transactions = []' + - 'state.transactions.add(doc.some_field.value)' + - 'long sum = 0; for (t in state.transactions) { sum += t } return sum' + - 'long sum = 0; for (a in states) { sum += a } return sum' +search.aggs.allowed_stored_metric_scripts: [] +---- diff --git a/docs/reference/search/multi-search-template-api.asciidoc b/docs/reference/search/multi-search-template-api.asciidoc index c8eea52a6fd9b..b1c9518b1f2bc 100644 --- a/docs/reference/search/multi-search-template-api.asciidoc +++ b/docs/reference/search/multi-search-template-api.asciidoc @@ -22,9 +22,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/render-search-template-api.asciidoc b/docs/reference/search/render-search-template-api.asciidoc index 1f259dddf6879..0c782f26068e6 100644 --- a/docs/reference/search/render-search-template-api.asciidoc +++ b/docs/reference/search/render-search-template-api.asciidoc @@ -22,9 +22,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index ba0f6c018b0eb..fb474fe6bf4e6 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -1,13 +1,13 @@ [[rrf]] === Reciprocal rank fusion -preview::["This functionality is in technical preview and may be changed or removed in a future release. The syntax will likely change before GA. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] +preview::["This functionality is in technical preview and may be changed or removed in a future release. +The syntax will likely change before GA. +Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf[Reciprocal rank fusion (RRF)] -is a method for combining multiple result sets with different relevance -indicators into a single result set. RRF requires no tuning, and the different -relevance indicators do not have to be related to each other to achieve high-quality -results. +is a method for combining multiple result sets with different relevance indicators into a single result set. +RRF requires no tuning, and the different relevance indicators do not have to be related to each other to achieve high-quality results. RRF uses the following formula to determine the score for ranking each document: @@ -31,15 +31,13 @@ return score [[rrf-api]] ==== Reciprocal rank fusion API -You can use RRF as part of a <> to combine and rank -documents using separate sets of top documents (result sets) from a -combination of <> using an -<>. A minimum of *two* child retrievers is -required for ranking. +You can use RRF as part of a <> to combine and rank documents using separate sets of top documents (result sets) from a combination of <> using an +<>. +A minimum of *two* child retrievers is required for ranking. An RRF retriever is an optional object defined as part of a search request's -<>. The RRF retriever object contains -the following parameters: +<>. +The RRF retriever object contains the following parameters: include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] @@ -82,21 +80,15 @@ GET example-index/_search ---- // TEST[skip:example fragment] -In the above example, we execute the `knn` and `standard` retrievers -independently of each other. Then we use the `rrf` retriever to combine -the results. +In the above example, we execute the `knn` and `standard` retrievers independently of each other. +Then we use the `rrf` retriever to combine the results. -<1> First, we execute the kNN search specified by the `knn` retriever to -get its global top 50 results. -<2> Second, we execute the query specified by the `standard` retriever to get -its global top 50 results. -<3> Then, on a coordinating node, we combine the kNN search top documents with -the query top documents and rank them based on the RRF formula using parameters from -the `rrf` retriever to get the combined top documents using the default `size` of `10`. +<1> First, we execute the kNN search specified by the `knn` retriever to get its global top 50 results. +<2> Second, we execute the query specified by the `standard` retriever to get its global top 50 results. +<3> Then, on a coordinating node, we combine the kNN search top documents with the query top documents and rank them based on the RRF formula using parameters from the `rrf` retriever to get the combined top documents using the default `size` of `10`. -Note that if `k` from a knn search is larger than `rank_window_size`, the results are -truncated to `rank_window_size`. If `k` is smaller than `rank_window_size`, the results are -`k` size. +Note that if `k` from a knn search is larger than `rank_window_size`, the results are truncated to `rank_window_size`. +If `k` is smaller than `rank_window_size`, the results are `k` size. [[rrf-supported-features]] ==== Reciprocal rank fusion supported features @@ -117,16 +109,13 @@ The `rrf` retriever does not currently support: * <> * <> -Using unsupported features as part of a search with an `rrf` retriever results -in an exception. +Using unsupported features as part of a search with an `rrf` retriever results in an exception. [[rrf-using-multiple-standard-retrievers]] ==== Reciprocal rank fusion using multiple standard retrievers -The `rrf` retriever provides a way to combine and rank multiple -`standard` retrievers. A primary use case is combining top documents -from a traditional BM25 query and an <> -query to achieve improved relevance. +The `rrf` retriever provides a way to combine and rank multiple `standard` retrievers. +A primary use case is combining top documents from a traditional BM25 query and an <> query to achieve improved relevance. An example request using RRF with multiple standard retrievers: @@ -149,11 +138,10 @@ GET example-index/_search { "standard": { <2> "query": { - "text_expansion":{ - "ml.tokens":{ - "model_id":"my_elser_model", - "model_text":"What blue shoes are on sale?" - } + "sparse_vector":{ + "field": "ml.tokens", + "inference_id": "my_elser_model", + "query": "What blue shoes are on sale?" } } } @@ -167,38 +155,28 @@ GET example-index/_search ---- // TEST[skip:example fragment] -In the above example, we execute each of the two `standard` retrievers -independently of each other. Then we use the `rrf` retriever to combine -the results. +In the above example, we execute each of the two `standard` retrievers independently of each other. +Then we use the `rrf` retriever to combine the results. -<1> First we run the `standard` retriever -specifying a term query for `blue shoes sales` using the standard BM25 -scoring algorithm. -<2> Next we run the `standard` retriever specifying a -text expansion query for `What blue shoes are on sale?` +<1> First we run the `standard` retriever specifying a term query for `blue shoes sales` using the standard BM25 scoring algorithm. +<2> Next we run the `standard` retriever specifying a sparse_vector query for `What blue shoes are on sale?` using our <> scoring algorithm. -<3> The `rrf` retriever allows us to combine the two top documents sets -generated by completely independent scoring algorithms with equal weighting. +<3> The `rrf` retriever allows us to combine the two top documents sets generated by completely independent scoring algorithms with equal weighting. -Not only does this remove the need to figure out what the appropriate -weighting is using linear combination, but RRF is also shown to give improved -relevance over either query individually. +Not only does this remove the need to figure out what the appropriate weighting is using linear combination, but RRF is also shown to give improved relevance over either query individually. [[rrf-using-sub-searches]] ==== Reciprocal rank fusion using sub searches -RRF using sub searches is no longer supported. Use the -<> instead. See -<> -for an example. +RRF using sub searches is no longer supported. +Use the <> instead. +See <> for an example. [[rrf-full-example]] ==== Reciprocal rank fusion full example -We begin by creating a mapping for an index with a text field, a vector field, -and an integer field along with indexing several documents. For this example we -are going to use a vector with only a single dimension to make the ranking easier -to explain. +We begin by creating a mapping for an index with a text field, a vector field, and an integer field along with indexing several documents. +For this example we are going to use a vector with only a single dimension to make the ranking easier to explain. [source,console] ---- @@ -262,9 +240,7 @@ POST example-index/_refresh ---- // TEST -We now execute a search using an `rrf` retriever with a `standard` retriever -specifying a BM25 query, a `knn` retriever specifying a kNN search, and -a terms aggregation. +We now execute a search using an `rrf` retriever with a `standard` retriever specifying a BM25 query, a `knn` retriever specifying a kNN search, and a terms aggregation. [source,console] ---- @@ -307,9 +283,8 @@ GET example-index/_search ---- // TEST[continued] -And we receive the response with ranked `hits` and the terms -aggregation result. We have both the ranker's `score` -and the `_rank` option to show our top-ranked documents. +And we receive the response with ranked `hits` and the terms aggregation result. +We have both the ranker's `score` and the `_rank` option to show our top-ranked documents. [source,console-response] ---- @@ -387,10 +362,8 @@ and the `_rank` option to show our top-ranked documents. ---- // TESTRESPONSE[s/: \.\.\./: $body.$_path/] -Let's break down how these hits were ranked. We -start by running the `standard` retriever specifying a query -and the `knn` retriever specifying a kNN search separately to -collect what their individual hits are. +Let's break down how these hits were ranked. +We start by running the `standard` retriever specifying a query and the `knn` retriever specifying a kNN search separately to collect what their individual hits are. First, we look at the hits for the query from the `standard` retriever. @@ -444,8 +417,8 @@ First, we look at the hits for the query from the `standard` retriever. <3> rank 3, `_id` 2 <4> rank 4, `_id` 1 -Note that our first hit doesn't have a value for the `vector` field. Now, -we look at the results for the kNN search from the `knn` retriever. +Note that our first hit doesn't have a value for the `vector` field. +Now, we look at the results for the kNN search from the `knn` retriever. [source,console-result] ---- @@ -497,9 +470,7 @@ we look at the results for the kNN search from the `knn` retriever. <3> rank 3, `_id` 1 <4> rank 4, `_id` 5 -We can now take the two individually ranked result sets and apply the -RRF formula to them using parameters from the `rrf` retriever to get -our final ranking. +We can now take the two individually ranked result sets and apply the RRF formula to them using parameters from the `rrf` retriever to get our final ranking. [source,python] ---- @@ -512,17 +483,14 @@ _id: 5 = 1.0/(1+4) = 0.2000 ---- // NOTCONSOLE -We rank the documents based on the RRF formula with a `rank_window_size` of `5` -truncating the bottom `2` docs in our RRF result set with a `size` of `3`. -We end with `_id: 3` as `_rank: 1`, `_id: 2` as `_rank: 2`, and -`_id: 4` as `_rank: 3`. This ranking matches the result set from the -original RRF search as expected. +We rank the documents based on the RRF formula with a `rank_window_size` of `5` truncating the bottom `2` docs in our RRF result set with a `size` of `3`. +We end with `_id: 3` as `_rank: 1`, `_id: 2` as `_rank: 2`, and `_id: 4` as `_rank: 3`. +This ranking matches the result set from the original RRF search as expected. ==== Explain in RRF -In addition to individual query scoring details, we can make use of the `explain=true` parameter to get information on how -the RRF scores for each document were computed. Working with the example above, and by adding -`explain=true` to the search request, we'd now have a response that looks like the following: +In addition to individual query scoring details, we can make use of the `explain=true` parameter to get information on how the RRF scores for each document were computed. +Working with the example above, and by adding `explain=true` to the search request, we'd now have a response that looks like the following: [source,js] ---- @@ -585,8 +553,8 @@ the RRF scores for each document were computed. Working with the example above, <6> the `value` heres specifies the `rank` of this document for the second (`knn`) query In addition to the above, explain in RRF also supports <> using the `_name` parameter. -Using named queries allows for easier and more intuitive understanding of the RRF score computation, -especially when dealing with multiple queries. So, we would now have: +Using named queries allows for easier and more intuitive understanding of the RRF score computation, especially when dealing with multiple queries. +So, we would now have: [source,js] ---- @@ -632,6 +600,7 @@ GET example-index/_search <1> Here we specify a `_name` for the `knn` retriever The response would now include the named query in the explanation: + [source,js] ---- { @@ -677,25 +646,19 @@ The response would now include the named query in the explanation: ==== Pagination in RRF When using `rrf` you can paginate through the results using the `from` parameter. -As the final ranking is solely dependent on the original query ranks, to ensure -consistency when paginating, we have to make sure that while `from` changes, the order -of what we have already seen remains intact. To that end, we're using a fixed `rank_window_size` -as the whole available result set upon which we can paginate. +As the final ranking is solely dependent on the original query ranks, to ensure consistency when paginating, we have to make sure that while `from` changes, the order of what we have already seen remains intact. +To that end, we're using a fixed `rank_window_size` as the whole available result set upon which we can paginate. This essentially means that if: -* `from + size` ≤ `rank_window_size` : we could get `results[from: from+size]` documents back from -the final `rrf` ranked result set +* `from + size` ≤ `rank_window_size` : we could get `results[from: from+size]` documents back from the final `rrf` ranked result set + +* `from + size` > `rank_window_size` : we would get 0 results back, as the request would fall outside the available `rank_window_size`-sized result set. -* `from + size` > `rank_window_size` : we would get 0 results back, as the request would fall outside the -available `rank_window_size`-sized result set. +An important thing to note here is that since `rank_window_size` is all the results that we'll get to see from the individual query components, pagination guarantees consistency, i.e. no documents are skipped or duplicated in multiple pages, iff `rank_window_size` remains the same. +If `rank_window_size` changes, then the order of the results might change as well, even for the same ranks. -An important thing to note here is that since `rank_window_size` is all the results that we'll get to see -from the individual query components, pagination guarantees consistency, i.e. no documents are skipped -or duplicated in multiple pages, iff `rank_window_size` remains the same. If `rank_window_size` changes, then the order -of the results might change as well, even for the same ranks. +To illustrate all of the above, let's consider the following simplified example where we have two queries, `queryA` and `queryB` and their ranked documents: -To illustrate all of the above, let's consider the following simplified example where we have -two queries, `queryA` and `queryB` and their ranked documents: [source,python] ---- | queryA | queryB | @@ -709,6 +672,7 @@ _id: | | 2 | For `rank_window_size=5` we would get to see all documents from both `queryA` and `queryB`. Assuming a `rank_constant=1`, the `rrf` scores would be: + [source,python] ---- # doc | queryA | queryB | score @@ -720,17 +684,17 @@ _id: 5 = 0 + 1.0/(1+1) = 0.5 ---- // NOTCONSOLE -So the final ranked result set would be [`1`, `4`, `2`, `3`, `5`] and we would paginate over that, since -`rank_window_size == len(results)`. In this scenario, we would have: +So the final ranked result set would be [`1`, `4`, `2`, `3`, `5`] and we would paginate over that, since `rank_window_size == len(results)`. +In this scenario, we would have: * `from=0, size=2` would return documents [`1`, `4`] with ranks `[1, 2]` * `from=2, size=2` would return documents [`2`, `3`] with ranks `[3, 4]` * `from=4, size=2` would return document [`5`] with rank `[5]` * `from=6, size=2` would return an empty result set as it there are no more results to iterate over -Now, if we had a `rank_window_size=2`, we would only get to see `[1, 2]` and `[5, 4]` documents -for queries `queryA` and `queryB` respectively. Working out the math, we would see that the results would now -be slightly different, because we would have no knowledge of the documents in positions `[3: end]` for either query. +Now, if we had a `rank_window_size=2`, we would only get to see `[1, 2]` and `[5, 4]` documents for queries `queryA` and `queryB` respectively. +Working out the math, we would see that the results would now be slightly different, because we would have no knowledge of the documents in positions `[3: end]` for either query. + [source,python] ---- # doc | queryA | queryB | score @@ -741,8 +705,8 @@ _id: 5 = 0 + 1.0/(1+1) = 0.5 ---- // NOTCONSOLE -The final ranked result set would be [`1`, `5`, `2`, `4`], and we would be able to paginate -on the top `rank_window_size` results, i.e. [`1`, `5`]. So for the same params as above, we would now have: +The final ranked result set would be [`1`, `5`, `2`, `4`], and we would be able to paginate on the top `rank_window_size` results, i.e. [`1`, `5`]. +So for the same params as above, we would now have: * `from=0, size=2` would return [`1`, `5`] with ranks `[1, 2]` * `from=2, size=2` would return an empty result set as it would fall outside the available `rank_window_size` results. diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index 038396e558607..c60b5281c05e5 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -21,9 +21,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/search-your-data/behavioral-analytics/behavioral-analytics-start.asciidoc b/docs/reference/search/search-your-data/behavioral-analytics/behavioral-analytics-start.asciidoc index fe1515302082f..f29a6f3a37fdf 100644 --- a/docs/reference/search/search-your-data/behavioral-analytics/behavioral-analytics-start.asciidoc +++ b/docs/reference/search/search-your-data/behavioral-analytics/behavioral-analytics-start.asciidoc @@ -182,7 +182,7 @@ createTracker({ [[behavioral-analytics-start-ui-integration-search-ui]] ==== Search UI integration -https://docs.elastic.co/search-ui/getting-started[Search UI^] is a JavaScript library for building search experiences. +https://docs.elastic.co/search-ui[Search UI^] is a JavaScript library for building search experiences. Use the https://www.npmjs.com/package/@elastic/search-ui-analytics-plugin[Search UI analytics plugin^] available on NPM to integrate behavioral analytics with Search UI. This integration enables you to dispatch events from Search UI to the behavioral analytics client. diff --git a/docs/reference/search/search-your-data/cohere-es.asciidoc b/docs/reference/search/search-your-data/cohere-es.asciidoc index f12f23ad2c5dc..3029cfd9f098c 100644 --- a/docs/reference/search/search-your-data/cohere-es.asciidoc +++ b/docs/reference/search/search-your-data/cohere-es.asciidoc @@ -25,14 +25,15 @@ set. Refer to https://docs.cohere.com/docs/elasticsearch-and-cohere[Cohere's tutorial] for an example using a different data set. +You can also review the https://colab.research.google.com/github/elastic/elasticsearch-labs/blob/main/notebooks/integrations/cohere/cohere-elasticsearch.ipynb[Colab notebook version of this tutorial]. + [discrete] [[cohere-es-req]] ==== Requirements -* A https://cohere.com/[Cohere account], -* an https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html[Elastic Cloud] -account, +* A paid https://cohere.com/[Cohere account] is required to use the {infer-cap} API with the Cohere service as the Cohere free trial API usage is limited, +* an https://www.elastic.co/guide/en/cloud/current/ec-getting-started.html[Elastic Cloud] account, * Python 3.7 or higher. @@ -329,17 +330,12 @@ they were sent to the {infer} endpoint. [[cohere-es-rag]] ==== Retrieval Augmented Generation (RAG) with Cohere and {es} -RAG is a method for generating text using additional information fetched from an -external data source. With the ranked results, you can build a RAG system on the -top of what you previously created by using -https://docs.cohere.com/docs/chat-api[Cohere's Chat API]. +https://docs.cohere.com/docs/retrieval-augmented-generation-rag[RAG] is a method for generating text using additional information fetched from an external data source. +With the ranked results, you can build a RAG system on the top of what you previously created by using https://docs.cohere.com/docs/chat-api[Cohere's Chat API]. -Pass in the retrieved documents and the query to receive a grounded response -using Cohere's newest generative model -https://docs.cohere.com/docs/command-r-plus[Command R+]. +Pass in the retrieved documents and the query to receive a grounded response using Cohere's newest generative model https://docs.cohere.com/docs/command-r-plus[Command R+]. -Then pass in the query and the documents to the Chat API, and print out the -response. +Then pass in the query and the documents to the Chat API, and print out the response. [source,py] -------------------------------------------------- diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index 0e61b44eda413..70cf9eec121d7 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -410,6 +410,24 @@ post-filtering approach, where the filter is applied **after** the approximate kNN search completes. Post-filtering has the downside that it sometimes returns fewer than k results, even when there are enough matching documents. +[discrete] +[[approximate-knn-search-and-filtering]] +==== Approximate kNN search and filtering + +Unlike conventional query filtering, where more restrictive filters typically lead to faster queries, +applying filters in an approximate kNN search with an HNSW index can decrease performance. +This is because searching the HNSW graph requires additional exploration to obtain the `num_candidates` +that meet the filter criteria. + +To avoid significant performance drawbacks, Lucene implements the following strategies per segment: + +* If the filtered document count is less than or equal to num_candidates, the search bypasses the HNSW graph and +uses a brute force search on the filtered documents. + +* While exploring the HNSW graph, if the number of nodes explored exceeds the number of documents that satisfy the filter, +the search will stop exploring the graph and switch to a brute force search over the filtered documents. + + [discrete] ==== Combine approximate kNN with other features diff --git a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc index 89bf3da3e11e0..24579dfdf3adf 100644 --- a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc @@ -1,25 +1,22 @@ [[retrievers-overview]] === Retrievers -preview::[] +preview::[] A retriever is an abstraction that was added to the Search API in *8.14.0*. -This abstraction enables the configuration of multi-stage retrieval -pipelines within a single `_search` call. This simplifies your search -application logic, because you no longer need to configure complex searches via -multiple {es} calls or implement additional client-side logic to -combine results from different queries. +This abstraction enables the configuration of multi-stage retrieval pipelines within a single `_search` call. +This simplifies your search application logic, because you no longer need to configure complex searches via multiple {es} calls or implement additional client-side logic to combine results from different queries. -This document provides a general overview of the retriever abstraction. -For implementation details, including notable restrictions, check out the -<> in the `_search` API docs. +This document provides a general overview of the retriever abstraction. +For implementation details, including notable restrictions, check out the +<> in the `_search` API docs. [discrete] [[retrievers-overview-types]] ==== Retriever types Retrievers come in various types, each tailored for different search operations. -The following retrievers are currently available: +The following retrievers are currently available: * <>. Returns top documents from a traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. @@ -34,69 +31,48 @@ the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple resul with different relevance indicators into a single result set. An RRF retriever is a *compound retriever*, where its `filter` element is propagated to its sub retrievers. ++ +Sub retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. +See the <> for detailed examples and information on how to use the RRF retriever. * *`text_similarity_reranker` Retriever*. Used for <>. Requires first creating a `rerank` task using the <>. -+ -Sub retrievers may not use elements that -are restricted by having a compound retriever as part of the retriever tree. -See the <> for detailed -examples and information on how to use the RRF retriever. - -[NOTE] -==== -Stay tuned for more retriever types in future releases! -==== - [discrete] ==== What makes retrievers useful? -Here's an overview of what makes retrievers useful and how they differ from -regular queries. - -. *Simplified user experience*. Retrievers simplify the user experience by -allowing entire retrieval pipelines to be configured in a single API call. This -maintains backward compatibility with traditional query elements by -automatically translating them to the appropriate retriever. -. *Structured retrieval*. Retrievers provide a more structured way to define search -operations. They allow searches to be described using a "retriever tree", a -hierarchical structure that clarifies the sequence and logic of operations, -making complex searches more understandable and manageable. -. *Composability and flexibility*. Retrievers enable flexible composability, -allowing you to build pipelines and seamlessly integrate different retrieval -strategies into these pipelines. Retrievers make it easy to test out different -retrieval strategy combinations. -. *Compound operations*. A retriever can have sub retrievers. This -allows complex nested searches where the results of one retriever feed into -another, supporting sophisticated querying strategies that might involve -multiple stages or criteria. -. *Retrieval as a first-class concept*. Unlike -traditional queries, where the query is a part of a larger search API call, -retrievers are designed as standalone entities that can be combined or used in -isolation. This enables a more modular and flexible approach to constructing -searches. -. *Enhanced control over document scoring and ranking*. Retrievers -allow for more explicit control over how documents are scored and filtered. For -instance, you can specify minimum score thresholds, apply complex filters -without affecting scoring, and use parameters like `terminate_after` for -performance optimizations. -. *Integration with existing {es} functionalities*. Even though -retrievers can be used instead of existing `_search` API syntax (like the -`query` and `knn`), they are designed to integrate seamlessly with things like -pagination (`search_after`) and sorting. They also maintain compatibility with -aggregation operations by treating the combination of all leaf retrievers as +Here's an overview of what makes retrievers useful and how they differ from regular queries. + +. *Simplified user experience*. +Retrievers simplify the user experience by allowing entire retrieval pipelines to be configured in a single API call. +This maintains backward compatibility with traditional query elements by automatically translating them to the appropriate retriever. +. *Structured retrieval*. +Retrievers provide a more structured way to define search operations. +They allow searches to be described using a "retriever tree", a hierarchical structure that clarifies the sequence and logic of operations, making complex searches more understandable and manageable. +. *Composability and flexibility*. +Retrievers enable flexible composability, allowing you to build pipelines and seamlessly integrate different retrieval strategies into these pipelines. +Retrievers make it easy to test out different retrieval strategy combinations. +. *Compound operations*. +A retriever can have sub retrievers. +This allows complex nested searches where the results of one retriever feed into another, supporting sophisticated querying strategies that might involve multiple stages or criteria. +. *Retrieval as a first-class concept*. +Unlike traditional queries, where the query is a part of a larger search API call, retrievers are designed as standalone entities that can be combined or used in isolation. +This enables a more modular and flexible approach to constructing searches. +. *Enhanced control over document scoring and ranking*. +Retrievers allow for more explicit control over how documents are scored and filtered. +For instance, you can specify minimum score thresholds, apply complex filters without affecting scoring, and use parameters like `terminate_after` for performance optimizations. +. *Integration with existing {es} functionalities*. +Even though retrievers can be used instead of existing `_search` API syntax (like the +`query` and `knn`), they are designed to integrate seamlessly with things like pagination (`search_after`) and sorting. +They also maintain compatibility with aggregation operations by treating the combination of all leaf retrievers as `should` clauses in a boolean query. -. *Cleaner separation of concerns*. When using compound retrievers, only the -query element is allowed, which enforces a cleaner separation of concerns -and prevents the complexity that might arise from overly nested or -interdependent configurations. +. *Cleaner separation of concerns*. +When using compound retrievers, only the query element is allowed, which enforces a cleaner separation of concerns and prevents the complexity that might arise from overly nested or interdependent configurations. [discrete] [[retrievers-overview-example]] ==== Example -The following example demonstrates how using retrievers -simplify the composability of queries for RRF ranking. +The following example demonstrates how using retrievers simplify the composability of queries for RRF ranking. [source,js] ---- @@ -108,11 +84,10 @@ GET example-index/_search { "standard": { "query": { - "text_expansion": { - "vector.tokens": { - "model_id": ".elser_model_2", - "model_text": "What blue shoes are on sale?" - } + "sparse_vector": { + "field": "vector.tokens", + "inference_id": "my-elser-endpoint", + "query": "What blue shoes are on sale?" } } } @@ -133,8 +108,7 @@ GET example-index/_search ---- //NOTCONSOLE -This example demonstrates how you can combine different -retrieval strategies into a single `retriever` pipeline. +This example demonstrates how you can combine different retrieval strategies into a single `retriever` pipeline. Compare to `RRF` with `sub_searches` approach: @@ -156,14 +130,13 @@ GET example-index/_search }, { "query":{ - "text_expansion":{ - "vector.tokens":{ - "model_id":".elser_model_2", - "model_text":"What blue shoes are on sale?" + "sparse_vector": { + "field": "vector.tokens", + "inference_id": "my-elser-endoint", + "query": "What blue shoes are on sale?" } } } - } ], "rank":{ "rrf":{ @@ -180,7 +153,7 @@ GET example-index/_search [[retrievers-overview-glossary]] ==== Glossary -Here are some important terms: +Here are some important terms: * *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to produce top hits. diff --git a/docs/reference/search/search-your-data/search-application-api.asciidoc b/docs/reference/search/search-your-data/search-application-api.asciidoc index 6312751d37bca..2fe28faed546f 100644 --- a/docs/reference/search/search-your-data/search-application-api.asciidoc +++ b/docs/reference/search/search-your-data/search-application-api.asciidoc @@ -295,6 +295,12 @@ This may be helpful when experimenting with specific search queries that you wan If your search application's name is `my_search_application`, your alias will be `my_search_application`. You can search this using the <>. +[discrete] +[[search-application-cross-cluster-search]] +===== Cross cluster search + +Search applications do not currently support {ccs} because it is not possible to add a remote cluster's index or index pattern to an index alias. + [NOTE] ==== You should use the Search Applications management APIs to update your application and _not_ directly use {es} APIs such as the alias API. @@ -411,11 +417,10 @@ PUT _application/search_application/my-search-app { "standard": { "query": { - "text_expansion": { - "ml.inference.{{.}}_expanded.predicted_value": { - "model_text": "{{query_string}}", - "model_id": "" - } + "sparse_vector": { + "field": "ml.inference.{{.}}_expanded.predicted_value", + "inference_id": "", + "query": "{{query_string}}" } } } @@ -503,12 +508,10 @@ PUT _application/search_application/my_search_application {{#elser}} {{#elser_fields}} { - "text_expansion": { - "ml.inference.{{name}}_expanded.predicted_value": { - "model_text": "{{query_string}}", - "model_id": ".elser_model_1", - "boost": "{{boost}}" - } + "sparse_vector": { + "field": "ml.inference.{{.}}_expanded.predicted_value", + "inference_id": "", + "query": "{{query_string}}" } }, {{/elser_fields}} @@ -667,12 +670,11 @@ PUT _application/search_application/my_search_application "should": [ {{#elser_fields}} { - "text_expansion": { - "ml.inference.{{name}}_expanded.predicted_value": { - "model_text": "{{query_string}}", - "model_id": "" + "sparse_vector": { + "field": "ml.inference.{{.}}_expanded.predicted_value", + "inference_id": "", + "query": "{{query_string}}" } - } }, {{/elser_fields}} ] diff --git a/docs/reference/search/search-your-data/search-template.asciidoc b/docs/reference/search/search-your-data/search-template.asciidoc index 7a7f09f4a37a7..489a03c0a6a2a 100644 --- a/docs/reference/search/search-your-data/search-template.asciidoc +++ b/docs/reference/search/search-your-data/search-template.asciidoc @@ -42,9 +42,6 @@ PUT _scripts/my-search-template }, "from": "{{from}}", "size": "{{size}}" - }, - "params": { - "query_string": "My query string" } } } diff --git a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc index 6641d8205a461..594c22fb65981 100644 --- a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc +++ b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc @@ -6,8 +6,6 @@ ++++ [[query-rules]] -preview::[] - _Query rules_ allow customization of search results for queries that match specified criteria metadata. This allows for more control over results, for example ensuring that promoted documents that match defined criteria are returned at the top of the result list. Metadata is defined in the query rule, and is matched against the query criteria. diff --git a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc index e7f503a4a6c70..11aec59a00b30 100644 --- a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc @@ -1,61 +1,44 @@ [[semantic-search-elser]] === Tutorial: semantic search with ELSER + ++++ Semantic search with ELSER ++++ -Elastic Learned Sparse EncodeR - or ELSER - is an NLP model trained by Elastic -that enables you to perform semantic search by using sparse vector -representation. Instead of literal matching on search terms, semantic search -retrieves results based on the intent and the contextual meaning of a search -query. +Elastic Learned Sparse EncodeR - or ELSER - is an NLP model trained by Elastic that enables you to perform semantic search by using sparse vector representation. +Instead of literal matching on search terms, semantic search retrieves results based on the intent and the contextual meaning of a search query. -The instructions in this tutorial shows you how to use ELSER to perform semantic -search on your data. +The instructions in this tutorial shows you how to use ELSER to perform semantic search on your data. IMPORTANT: For the easiest way to perform semantic search in the {stack}, refer to the <> end-to-end tutorial. -NOTE: Only the first 512 extracted tokens per field are considered during -semantic search with ELSER. Refer to -{ml-docs}/ml-nlp-limitations.html#ml-nlp-elser-v1-limit-512[this page] for more -information. - +NOTE: Only the first 512 extracted tokens per field are considered during semantic search with ELSER. +Refer to {ml-docs}/ml-nlp-limitations.html#ml-nlp-elser-v1-limit-512[this page] for more information. [discrete] [[requirements]] ==== Requirements -To perform semantic search by using ELSER, you must have the NLP model deployed -in your cluster. Refer to the -{ml-docs}/ml-nlp-elser.html[ELSER documentation] to learn how to download and -deploy the model. - -NOTE: The minimum dedicated ML node size for deploying and using the ELSER model -is 4 GB in Elasticsearch Service if -{cloud}/ec-autoscaling.html[deployment autoscaling] is turned off. Turning on -autoscaling is recommended because it allows your deployment to dynamically -adjust resources based on demand. Better performance can be achieved by using -more allocations or more threads per allocation, which requires bigger ML nodes. -Autoscaling provides bigger nodes when required. If autoscaling is turned off, -you must provide suitably sized nodes yourself. +To perform semantic search by using ELSER, you must have the NLP model deployed in your cluster. +Refer to the {ml-docs}/ml-nlp-elser.html[ELSER documentation] to learn how to download and deploy the model. +NOTE: The minimum dedicated ML node size for deploying and using the ELSER model is 4 GB in Elasticsearch Service if +{cloud}/ec-autoscaling.html[deployment autoscaling] is turned off. +Turning on autoscaling is recommended because it allows your deployment to dynamically adjust resources based on demand. +Better performance can be achieved by using more allocations or more threads per allocation, which requires bigger ML nodes. +Autoscaling provides bigger nodes when required. +If autoscaling is turned off, you must provide suitably sized nodes yourself. [discrete] [[elser-mappings]] ==== Create the index mapping -First, the mapping of the destination index - the index that contains the tokens -that the model created based on your text - must be created. The destination -index must have a field with the -<> or <> field -type to index the ELSER output. +First, the mapping of the destination index - the index that contains the tokens that the model created based on your text - must be created. +The destination index must have a field with the <> or <> field type to index the ELSER output. -NOTE: ELSER output must be ingested into a field with the `sparse_vector` or -`rank_features` field type. Otherwise, {es} interprets the token-weight pairs as -a massive amount of fields in a document. If you get an error similar to this -`"Limit of total fields [1000] has been exceeded while adding new fields"` then -the ELSER output field is not mapped properly and it has a field type different -than `sparse_vector` or `rank_features`. +NOTE: ELSER output must be ingested into a field with the `sparse_vector` or `rank_features` field type. +Otherwise, {es} interprets the token-weight pairs as a massive amount of fields in a document. +If you get an error similar to this: `"Limit of total fields [1000] has been exceeded while adding new fields"` then the ELSER output field is not mapped properly and it has a field type different than `sparse_vector` or `rank_features`. [source,console] ---- @@ -74,24 +57,22 @@ PUT my-index } ---- // TEST[skip:TBD] -<1> The name of the field to contain the generated tokens. It must be refrenced -in the {infer} pipeline configuration in the next step. +<1> The name of the field to contain the generated tokens. +It must be referenced in the {infer} pipeline configuration in the next step. <2> The field to contain the tokens is a `sparse_vector` field. <3> The name of the field from which to create the sparse vector representation. -In this example, the name of the field is `content`. It must be referenced in the -{infer} pipeline configuration in the next step. +In this example, the name of the field is `content`. +It must be referenced in the {infer} pipeline configuration in the next step. <4> The field type which is text in this example. To learn how to optimize space, refer to the <> section. - [discrete] [[inference-ingest-pipeline]] ==== Create an ingest pipeline with an inference processor Create an <> with an -<> to use ELSER to infer against the data -that is being ingested in the pipeline. +<> to use ELSER to infer against the data that is being ingested in the pipeline. [source,console] ---- @@ -112,8 +93,8 @@ PUT _ingest/pipeline/elser-v2-test ] } ---- -<1> Configuration object that defines the `input_field` for the {infer} process -and the `output_field` that will contain the {infer} results. + +<1> Configuration object that defines the `input_field` for the {infer} process and the `output_field` that will contain the {infer} results. //// [source,console] @@ -128,26 +109,23 @@ DELETE _ingest/pipeline/elser-v2-test [[load-data]] ==== Load data -In this step, you load the data that you later use in the {infer} ingest -pipeline to extract tokens from it. +In this step, you load the data that you later use in the {infer} ingest pipeline to extract tokens from it. -Use the `msmarco-passagetest2019-top1000` data set, which is a subset of the MS -MARCO Passage Ranking data set. It consists of 200 queries, each accompanied by -a list of relevant text passages. All unique passages, along with their IDs, -have been extracted from that data set and compiled into a +Use the `msmarco-passagetest2019-top1000` data set, which is a subset of the MS MARCO Passage Ranking data set. +It consists of 200 queries, each accompanied by a list of relevant text passages. +All unique passages, along with their IDs, have been extracted from that data set and compiled into a https://github.com/elastic/stack-docs/blob/main/docs/en/stack/ml/nlp/data/msmarco-passagetest2019-unique.tsv[tsv file]. -IMOPRTANT: The `msmarco-passagetest2019-top1000` dataset was not utilized to -train the model. It is only used in this tutorial as a sample dataset that is -easily accessible for demonstration purposes. You can use a different data set -to test the workflow and become familiar with it. +IMPORTANT: The `msmarco-passagetest2019-top1000` dataset was not utilized to train the model. +It is only used in this tutorial as a sample dataset that is easily accessible for demonstration purposes. +You can use a different data set to test the workflow and become familiar with it. Download the file and upload it to your cluster using the {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[Data Visualizer] -in the {ml-app} UI. Assign the name `id` to the first column and `content` to -the second column. The index name is `test-data`. Once the upload is complete, -you can see an index named `test-data` with 182469 documents. - +in the {ml-app} UI. +Assign the name `id` to the first column and `content` to the second column. +The index name is `test-data`. +Once the upload is complete, you can see an index named `test-data` with 182469 documents. [discrete] [[reindexing-data-elser]] @@ -171,9 +149,7 @@ POST _reindex?wait_for_completion=false } ---- // TEST[skip:TBD] -<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller -number makes the update of the reindexing process quicker which enables you to -follow the progress closely and detect errors early. +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller number makes the update of the reindexing process quicker which enables you to follow the progress closely and detect errors early. The call returns a task ID to monitor the progress: @@ -183,42 +159,35 @@ GET _tasks/ ---- // TEST[skip:TBD] -You can also open the Trained Models UI, select the Pipelines tab under ELSER to -follow the progress. - +You can also open the Trained Models UI, select the Pipelines tab under ELSER to follow the progress. [discrete] [[text-expansion-query]] -==== Semantic search by using the `text_expansion` query +==== Semantic search by using the `sparse_vector` query -To perform semantic search, use the <>, -and provide the query text and the ELSER model ID. The example below uses the -query text "How to avoid muscle soreness after running?", the `content_embedding` -field contains the generated ELSER output: +To perform semantic search, use the <>, and provide the query text and the inference ID associated with your ELSER model. +The example below uses the query text "How to avoid muscle soreness after running?", the `content_embedding` field contains the generated ELSER output: [source,console] ---- GET my-index/_search { "query":{ - "text_expansion":{ - "content_embedding":{ - "model_id":".elser_model_2", - "model_text":"How to avoid muscle soreness after running?" - } + "sparse_vector":{ + "field": "content_embedding", + "inference_id": "my-elser-endpoint", + "query": "How to avoid muscle soreness after running?" } } } ---- // TEST[skip:TBD] -The result is the top 10 documents that are closest in meaning to your query -text from the `my-index` index sorted by their relevancy. The result also -contains the extracted tokens for each of the relevant search results with their -weights. Tokens are learned associations capturing relevance, they are not -synonyms. To learn more about what tokens are, refer to -{ml-docs}/ml-nlp-elser.html#elser-tokens[this page]. It is possible to exclude -tokens from source, refer to <> to learn more. +The result is the top 10 documents that are closest in meaning to your query text from the `my-index` index sorted by their relevancy. +The result also contains the extracted tokens for each of the relevant search results with their weights. +Tokens are learned associations capturing relevance, they are not synonyms. +To learn more about what tokens are, refer to {ml-docs}/ml-nlp-elser.html#elser-tokens[this page]. +It is possible to exclude tokens from source, refer to <> to learn more. [source,consol-result] ---- @@ -265,17 +234,15 @@ tokens from source, refer to <> to learn more. [[text-expansion-compound-query]] ==== Combining semantic search with other queries -You can combine <> with other -queries in a <>. For example, use a filter clause -in a <> or a full text query with the same (or different) -query text as the `text_expansion` query. This enables you to combine the search -results from both queries. +You can combine <> with other queries in a <>. +For example, use a filter clause in a <> or a full text query with the same (or different) query text as the `sparse_vector` query. +This enables you to combine the search results from both queries. -The search hits from the `text_expansion` query tend to score higher than other -{es} queries. Those scores can be regularized by increasing or decreasing the -relevance scores of each query by using the `boost` parameter. Recall on the -`text_expansion` query can be high where there is a long tail of less relevant -results. Use the `min_score` parameter to prune those less relevant documents. +The search hits from the `sparse_vector` query tend to score higher than other +{es} queries. +Those scores can be regularized by increasing or decreasing the relevance scores of each query by using the `boost` parameter. +Recall on the `sparse_vector` query can be high where there is a long tail of less relevant results. +Use the `min_score` parameter to prune those less relevant documents. [source,console] ---- @@ -285,12 +252,11 @@ GET my-index/_search "bool": { <1> "should": [ { - "text_expansion": { - "content_embedding": { - "model_text": "How to avoid muscle soreness after running?", - "model_id": ".elser_model_2", - "boost": 1 <2> - } + "sparse_vector": { + "field": "content_embedding", + "inference_id": "my-elser-endpoint", + "query": "How to avoid muscle soreness after running?", + "boost": 1 <2> } }, { @@ -306,17 +272,13 @@ GET my-index/_search } ---- // TEST[skip:TBD] -<1> Both the `text_expansion` and the `query_string` queries are in a `should` -clause of a `bool` query. -<2> The `boost` value is `1` for the `text_expansion` query which is the default -value. This means that the relevance score of the results of this query are not -boosted. -<3> The `boost` value is `4` for the `query_string` query. The relevance score -of the results of this query is increased causing them to rank higher in the -search results. +<1> Both the `sparse_vector` and the `query_string` queries are in a `should` clause of a `bool` query. +<2> The `boost` value is `1` for the `sparse_vector` query which is the default value. +This means that the relevance score of the results of this query are not boosted. +<3> The `boost` value is `4` for the `query_string` query. +The relevance score of the results of this query is increased causing them to rank higher in the search results. <4> Only the results with a score equal to or higher than `10` are displayed. - [discrete] [[optimization]] === Optimizing performance @@ -325,24 +287,18 @@ search results. [[save-space]] ==== Saving disk space by excluding the ELSER tokens from document source -The tokens generated by ELSER must be indexed for use in the -<>. However, it is not -necessary to retain those terms in the document source. You can save disk space -by using the <> mapping to remove the ELSER -terms from the document source. +The tokens generated by ELSER must be indexed for use in the <>. +However, it is not necessary to retain those terms in the document source. +You can save disk space by using the <> mapping to remove the ELSER terms from the document source. WARNING: Reindex uses the document source to populate the destination index. -**Once the ELSER terms have been excluded from the source, they cannot be** -**recovered through reindexing.** Excluding the tokens from the source is a -space-saving optimsation that should only be applied if you are certain that -reindexing will not be required in the future! It's important to carefully -consider this trade-off and make sure that excluding the ELSER terms from the -source aligns with your specific requirements and use case. Review the -<> and <> sections carefully to learn -more about the possible consequences of excluding the tokens from the `_source`. - -The mapping that excludes `content_embedding` from the `_source` field can be -created by the following API call: +**Once the ELSER terms have been excluded from the source, they cannot be recovered through reindexing.** +Excluding the tokens from the source is a space-saving optimization that should only be applied if you are certain that reindexing will not be required in the future! +It's important to carefully consider this trade-off and make sure that excluding the ELSER terms from the source aligns with your specific requirements and use case. +Review the +<> and <> sections carefully to learn more about the possible consequences of excluding the tokens from the `_source`. + +The mapping that excludes `content_embedding` from the `_source` field can be created by the following API call: [source,console] ---- @@ -369,7 +325,7 @@ PUT my-index [NOTE] ==== -Depending on your data, the text expansion query may be faster with `track_total_hits: false`. +Depending on your data, the `sparse_vector` query may be faster with `track_total_hits: false`. ==== [discrete] diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc index 2d776077e13c5..fa84c3848b78c 100644 --- a/docs/reference/search/search-your-data/semantic-search.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search.asciidoc @@ -1,13 +1,12 @@ [[semantic-search]] == Semantic search -Semantic search is a search method that helps you find data based on the intent and contextual meaning of a search query, instead of a match on query terms -(lexical search). +Semantic search is a search method that helps you find data based on the intent and contextual meaning of a search query, instead of a match on query terms (lexical search). {es} provides various semantic search capabilities using {ml-docs}/ml-nlp.html[natural language processing (NLP)] and vector search. Using an NLP model enables you to extract text embeddings out of text. Embeddings are vectors that provide a numeric representation of a text. -Pieces of content with similar meaning have similar representations. +Pieces of content with similar meaning have similar representations. NLP models can be used in the {stack} various ways, you can: * deploy models in {es} @@ -29,44 +28,32 @@ IMPORTANT: For the easiest way to perform semantic search in the {stack}, refer [[semantic-search-select-nlp-model]] === Select an NLP model -{es} offers the usage of a -{ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-embedding[wide range of NLP models], -including both dense and sparse vector models. Your choice of the language model -is critical for implementing semantic search successfully. - -While it is possible to bring your own text embedding model, achieving good -search results through model tuning is challenging. Selecting an appropriate -model from our third-party model list is the first step. Training the model on -your own data is essential to ensure better search results than using only BM25. -However, the model training process requires a team of data scientists and ML -experts, making it expensive and time-consuming. - -To address this issue, Elastic provides a pre-trained representational model -called {ml-docs}/ml-nlp-elser.html[Elastic Learned Sparse EncodeR (ELSER)]. -ELSER, currently available only for English, is an out-of-domain sparse vector -model that does not require fine-tuning. This adaptability makes it suitable for -various NLP use cases out of the box. Unless you have a team of ML specialists, -it is highly recommended to use the ELSER model. - -In the case of sparse vector representation, the vectors mostly consist of zero -values, with only a small subset containing non-zero values. This representation -is commonly used for textual data. In the case of ELSER, each document in an -index and the query text itself are represented by high-dimensional sparse -vectors. Each non-zero element of the vector corresponds to a term in the model -vocabulary. The ELSER vocabulary contains around 30000 terms, so the sparse -vectors created by ELSER contain about 30000 values, the majority of which are -zero. Effectively the ELSER model is replacing the terms in the original query -with other terms that have been learnt to exist in the documents that best match -the original search terms in a training dataset, and weights to control how -important each is. +{es} offers the usage of a +{ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-embedding[wide range of NLP models], including both dense and sparse vector models. +Your choice of the language model is critical for implementing semantic search successfully. +While it is possible to bring your own text embedding model, achieving good search results through model tuning is challenging. +Selecting an appropriate model from our third-party model list is the first step. +Training the model on your own data is essential to ensure better search results than using only BM25. +However, the model training process requires a team of data scientists and ML experts, making it expensive and time-consuming. + +To address this issue, Elastic provides a pre-trained representational model called {ml-docs}/ml-nlp-elser.html[Elastic Learned Sparse EncodeR (ELSER)]. +ELSER, currently available only for English, is an out-of-domain sparse vector model that does not require fine-tuning. +This adaptability makes it suitable for various NLP use cases out of the box. +Unless you have a team of ML specialists, it is highly recommended to use the ELSER model. + +In the case of sparse vector representation, the vectors mostly consist of zero values, with only a small subset containing non-zero values. +This representation is commonly used for textual data. +In the case of ELSER, each document in an index and the query text itself are represented by high-dimensional sparse vectors. +Each non-zero element of the vector corresponds to a term in the model vocabulary. +The ELSER vocabulary contains around 30000 terms, so the sparse vectors created by ELSER contain about 30000 values, the majority of which are zero. +Effectively the ELSER model is replacing the terms in the original query with other terms that have been learnt to exist in the documents that best match the original search terms in a training dataset, and weights to control how important each is. [discrete] [[semantic-search-deploy-nlp-model]] === Deploy the model -After you decide which model you want to use for implementing semantic search, -you need to deploy the model in {es}. +After you decide which model you want to use for implementing semantic search, you need to deploy the model in {es}. include::{es-ref-dir}/tab-widgets/semantic-search/deploy-nlp-model-widget.asciidoc[] @@ -74,9 +61,8 @@ include::{es-ref-dir}/tab-widgets/semantic-search/deploy-nlp-model-widget.asciid [[semantic-search-field-mappings]] === Map a field for the text embeddings -Before you start using the deployed model to generate embeddings based on your -input text, you need to prepare your index mapping first. The mapping of the -index depends on the type of model. +Before you start using the deployed model to generate embeddings based on your input text, you need to prepare your index mapping first. +The mapping of the index depends on the type of model. include::{es-ref-dir}/tab-widgets/semantic-search/field-mappings-widget.asciidoc[] @@ -84,14 +70,12 @@ include::{es-ref-dir}/tab-widgets/semantic-search/field-mappings-widget.asciidoc [[semantic-search-generate-embeddings]] === Generate text embeddings -Once you have created the mappings for the index, you can generate text -embeddings from your input text. This can be done by using an -<> with an <>. -The ingest pipeline processes the input data and indexes it into the destination -index. At index time, the inference ingest processor uses the trained model to -infer against the data ingested through the pipeline. After you created the -ingest pipeline with the inference processor, you can ingest your data through -it to generate the model output. +Once you have created the mappings for the index, you can generate text embeddings from your input text. +This can be done by using an +<> with an <>. +The ingest pipeline processes the input data and indexes it into the destination index. +At index time, the inference ingest processor uses the trained model to infer against the data ingested through the pipeline. +After you created the ingest pipeline with the inference processor, you can ingest your data through it to generate the model output. include::{es-ref-dir}/tab-widgets/semantic-search/generate-embeddings-widget.asciidoc[] @@ -101,8 +85,7 @@ Now it is time to perform semantic search! [[semantic-search-search]] === Search the data -Depending on the type of model you have deployed, you can query rank features -with a text expansion query, or dense vectors with a kNN search. +Depending on the type of model you have deployed, you can query rank features with a <> query, or dense vectors with a kNN search. include::{es-ref-dir}/tab-widgets/semantic-search/search-widget.asciidoc[] @@ -110,13 +93,12 @@ include::{es-ref-dir}/tab-widgets/semantic-search/search-widget.asciidoc[] [[semantic-search-hybrid-search]] === Beyond semantic search with hybrid search -In some situations, lexical search may perform better than semantic search. For -example, when searching for single words or IDs, like product numbers. +In some situations, lexical search may perform better than semantic search. +For example, when searching for single words or IDs, like product numbers. Combining semantic and lexical search into one hybrid search request using -<> provides the best of both worlds. Not only that, -but hybrid search using reciprocal rank fusion {blog-ref}improving-information-retrieval-elastic-stack-hybrid[has been shown to perform better -in general]. +<> provides the best of both worlds. +Not only that, but hybrid search using reciprocal rank fusion {blog-ref}improving-information-retrieval-elastic-stack-hybrid[has been shown to perform better in general]. include::{es-ref-dir}/tab-widgets/semantic-search/hybrid-search-widget.asciidoc[] diff --git a/docs/reference/security/authorization/managing-roles.asciidoc b/docs/reference/security/authorization/managing-roles.asciidoc index 253aa33822234..535d70cbc5e9c 100644 --- a/docs/reference/security/authorization/managing-roles.asciidoc +++ b/docs/reference/security/authorization/managing-roles.asciidoc @@ -13,7 +13,9 @@ A role is defined by the following JSON structure: "indices": [ ... ], <4> "applications": [ ... ], <5> "remote_indices": [ ... ], <6> - "remote_cluster": [ ... ] <7> + "remote_cluster": [ ... ], <7> + "metadata": { ... }, <8> + "description": "..." <9> } ----- // NOTCONSOLE @@ -40,6 +42,16 @@ A role is defined by the following JSON structure: <>. This field is optional (missing `remote_cluster` privileges effectively means no additional cluster permissions for any API key based remote clusters). +<8> Metadata field associated with the role, such as `metadata.app_tag`. + Metadata is internally indexed as a <> field type. + This means that all sub-fields act like `keyword` fields when querying and sorting. + Metadata values can be simple values, but also lists and maps. + This field is optional. +<9> A string value with the description text of the role. + The maximum length of it is `1000` chars. + The field is internally indexed as a <> field type + (with default values for all parameters). + This field is optional. [[valid-role-name]] NOTE: Role names must be at least 1 and no more than 507 characters. They can diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index be30db4d100bd..44897baa8cb4a 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -1,6 +1,9 @@ -[role="xpack"] [[security-privileges]] === Security privileges +:frontmatter-description: A list of privileges that can be assigned to user roles. +:frontmatter-tags-products: [elasticsearch] +:frontmatter-tags-content-type: [reference] +:frontmatter-tags-user-goals: [secure] This section lists the privileges that you can assign to a role. @@ -19,16 +22,19 @@ See <> API for more informations. `create_snapshot`:: Privileges to create snapshots for existing repositories. Can also list and view details on existing repositories and snapshots. ++ +This privilege is not available in {serverless-full}. `cross_cluster_replication`:: Privileges to connect to <> for cross-cluster replication. + -- +This privilege is not available in {serverless-full}. + NOTE: This privilege should _not_ be directly granted. It is used internally by <> and <> to manage cross-cluster API keys. - -- `cross_cluster_search`:: @@ -36,14 +42,17 @@ Privileges to connect to <> and <> to manage cross-cluster API keys. - -- `grant_api_key`:: Privileges to create {es} API keys on behalf of other users. ++ +This privilege is not available in {serverless-full}. `manage`:: Builds on `monitor` and adds cluster operations that change values in the cluster. @@ -73,14 +82,37 @@ owned by other users. -- +`manage_autoscaling`:: +All operations related to managing autoscaling policies. ++ +This privilege is not available in {serverless-full}. + `manage_ccr`:: All {ccr} operations related to managing follower indices and auto-follow patterns. It also includes the authority to grant the privileges necessary to manage follower indices and auto-follow patterns. This privilege is necessary only on clusters that contain follower indices. ++ +This privilege is not available in {serverless-full}. + +`manage_data_frame_transforms`:: +All operations related to managing {transforms}. +deprecated[7.5] Use `manage_transform` instead. ++ +This privilege is not available in {serverless-full}. + +`manage_data_stream_global_retention`:: +All operations related to managing the data stream global retention settings. ++ +This privilege is not available in {serverless-full}. + +`manage_enrich`:: +All operations related to managing and executing enrich policies. `manage_ilm`:: -All {Ilm} operations related to managing policies. +All {ilm} operations related to managing policies. ++ +This privilege is not available in {serverless-full}. `manage_index_templates`:: All operations on index templates. @@ -112,6 +144,8 @@ Enables the use of {es} APIs <>, and <>) to initiate and manage OpenID Connect authentication on behalf of other users. ++ +This privilege is not available in {serverless-full}. `manage_own_api_key`:: All security-related operations on {es} API keys that are owned by the current @@ -129,10 +163,14 @@ All operations on ingest pipelines. `manage_rollup`:: All rollup operations, including creating, starting, stopping and deleting rollup jobs. ++ +This privilege is not available in {serverless-full}. `manage_saml`:: Enables the use of internal {es} APIs to initiate and manage SAML authentication on behalf of other users. ++ +This privilege is not available in {serverless-full}. `manage_search_application`:: All CRUD operations on <>. @@ -152,46 +190,49 @@ All security-related operations on {es} service accounts including <>, <>, <>, and <>. ++ +This privilege is not available in {serverless-full}. `manage_slm`:: All {slm} ({slm-init}) actions, including creating and updating policies and starting and stopping {slm-init}. ++ +This privilege is not available in {serverless-full}. ++ +deprecated:[8.15] Also grants the permission to start and stop {Ilm}, using +the {ref}/ilm-start.html[ILM start] and {ref}/ilm-stop.html[ILM stop] APIs. +In a future major release, this privilege will not grant any {Ilm} permissions. `manage_token`:: All security-related operations on tokens that are generated by the {es} Token Service. ++ +This privilege is not available in {serverless-full}. `manage_transform`:: All operations related to managing {transforms}. -`manage_autoscaling`:: -All operations related to managing autoscaling policies. - -`manage_data_frame_transforms`:: -All operations related to managing {transforms}. -deprecated[7.5] Use `manage_transform` instead. - -`manage_enrich`:: -All operations related to managing and executing enrich policies. - -`manage_data_stream_global_retention`:: -All operations related to managing the data stream global retention settings. - `manage_watcher`:: All watcher operations, such as putting watches, executing, activate or acknowledging. + -- +This privilege is not available in {serverless-full}. + NOTE: Watches that were created prior to version 6.1 or created when the {security-features} were disabled run as a system user with elevated privileges, including permission to read and write all indices. Newer watches run with the security roles of the user who created or updated them. - -- `monitor`:: All cluster read-only operations, like cluster health and state, hot threads, node info, node and cluster stats, and pending cluster tasks. +`monitor_data_stream_global_retention`:: +Allows the retrieval of the data stream global retention settings. ++ +This privilege is not available in {serverless-full}. + `monitor_enrich`:: All read-only operations related to managing and executing enrich policies. @@ -205,31 +246,40 @@ model snapshots, or results. `monitor_rollup`:: All read-only rollup operations, such as viewing the list of historical and currently running rollup jobs and their capabilities. ++ +This privilege is not available in {serverless-full}. `monitor_snapshot`:: Privileges to list and view details on existing repositories and snapshots. ++ +This privilege is not available in {serverless-full}. `monitor_text_structure`:: All read-only operations related to the <>. ++ +This privilege is not available in {serverless-full}. `monitor_transform`:: All read-only operations related to {transforms}. -`monitor_data_stream_global_retention`:: -Allows the retrieval of the data stream global retention settings. - `monitor_watcher`:: All read-only watcher operations, such as getting a watch and watcher stats. ++ +This privilege is not available in {serverless-full}. `read_ccr`:: All read-only {ccr} operations, such as getting information about indices and metadata for leader indices in the cluster. It also includes the authority to check whether users have the appropriate privileges to follow leader indices. This privilege is necessary only on clusters that contain leader indices. ++ +This privilege is not available in {serverless-full}. `read_ilm`:: All read-only {Ilm} operations, such as getting policies and checking the status of {Ilm} ++ +This privilege is not available in {serverless-full}. `read_pipeline`:: Read-only access to ingest pipline (get, simulate). @@ -237,6 +287,12 @@ Read-only access to ingest pipline (get, simulate). `read_slm`:: All read-only {slm-init} actions, such as getting policies and checking the {slm-init} status. ++ +This privilege is not available in {serverless-full}. ++ +deprecated:[8.15] Also grants the permission to get the {Ilm} status, using +the {ref}/ilm-get-status.html[ILM get status API]. In a future major release, +this privilege will not grant any {Ilm} permissions. `read_security`:: All read-only security-related operations, such as getting users, user profiles, @@ -247,6 +303,8 @@ on all {es} API keys. `transport_client`:: All privileges necessary for a transport client to connect. Required by the remote cluster to enable <>. ++ +This privilege is not available in {serverless-full}. [[privileges-list-indices]] ==== Indices privileges @@ -320,16 +378,19 @@ Privileges to perform cross-cluster replication for indices located on <>. This privilege should only be used for the `privileges` field of <>. ++ +This privilege is not available in {serverless-full}. `cross_cluster_replication_internal`:: Privileges to perform supporting actions for cross-cluster replication from <>. + -- +This privilege is not available in {serverless-full}. + NOTE: This privilege should _not_ be directly granted. It is used internally by <> and <> to manage cross-cluster API keys. - -- `delete`:: @@ -356,24 +417,30 @@ All `monitor` privileges plus index and data stream administration (aliases, analyze, cache clear, close, delete, exists, flush, mapping, open, field capabilities, force merge, refresh, settings, search shards, validate query). +`manage_data_stream_lifecycle`:: +All <> operations relating to reading and managing the built-in lifecycle of a data stream. +This includes operations such as adding and removing a lifecycle from a data stream. + `manage_follow_index`:: All actions that are required to manage the lifecycle of a follower index, which includes creating a follower index, closing it, and converting it to a regular index. This privilege is necessary only on clusters that contain follower indices. ++ +This privilege is not available in {serverless-full}. `manage_ilm`:: All {Ilm} operations relating to managing the execution of policies of an index or data stream. This includes operations such as retrying policies and removing a policy from an index or data stream. - -`manage_data_stream_lifecycle`:: -All <> operations relating to reading and managing the built-in lifecycle of a data stream. -This includes operations such as adding and removing a lifecycle from a data stream. ++ +This privilege is not available in {serverless-full}. `manage_leader_index`:: All actions that are required to manage the lifecycle of a leader index, which includes <>. This privilege is necessary only on clusters that contain leader indices. ++ +This privilege is not available in {serverless-full}. `monitor`:: All actions that are required for monitoring (recovery, segments info, index @@ -386,6 +453,8 @@ clear_scroll, search, suggest, tv). `read_cross_cluster`:: Read-only access to the search action from a <>. ++ +This privilege is not available in {serverless-full}. `view_index_metadata`:: Read-only access to index and data stream metadata (aliases, exists, @@ -411,6 +480,8 @@ of user names. (You can also specify users as an array of strings or a YAML sequence.) For more information, see <>. +This privilege is not available in {serverless-full}. + [[application-privileges]] ==== Application privileges diff --git a/docs/reference/security/operator-privileges/operator-only-functionality.asciidoc b/docs/reference/security/operator-privileges/operator-only-functionality.asciidoc index 9c1f5c9332706..5fc6add4d6396 100644 --- a/docs/reference/security/operator-privileges/operator-only-functionality.asciidoc +++ b/docs/reference/security/operator-privileges/operator-only-functionality.asciidoc @@ -21,7 +21,6 @@ given {es} version. * <> * <> * <> -* <> * <> * <> * <> diff --git a/docs/reference/settings/inference-settings.asciidoc b/docs/reference/settings/inference-settings.asciidoc index fa0905cf0ef73..3476058a17b21 100644 --- a/docs/reference/settings/inference-settings.asciidoc +++ b/docs/reference/settings/inference-settings.asciidoc @@ -34,7 +34,7 @@ message can be logged again. Defaults to one hour (`1h`). `xpack.inference.http.max_response_size`:: (<>) Specifies the maximum size in bytes an HTTP response is allowed to have, -defaults to `10mb`, the maximum configurable value is `50mb`. +defaults to `50mb`, the maximum configurable value is `100mb`. `xpack.inference.http.max_total_connections`:: (<>) Specifies the maximum number of connections the internal connection pool can diff --git a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc index 2b2090405af60..f18ef1ee6e826 100644 --- a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc @@ -39,9 +39,6 @@ POST /_snapshot/my_repository/_analyze?blob_count=10&max_blob_size=1mb&timeout=1 <> to use this API. For more information, see <>. -* If the <> is enabled, only operator -users can use this API. - [[repo-analysis-api-desc]] ==== {api-description-title} diff --git a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc index 101413ece38cb..ca75885921456 100644 --- a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc @@ -8,7 +8,6 @@ Creates or updates a synonyms set. NOTE: Synonyms sets are limited to a maximum of 10,000 synonym rules per set. -Synonym sets with more than 10,000 synonym rules will provide inconsistent search results. If you need to manage more synonym rules, you can create multiple synonyms sets. [[put-synonyms-set-request]] diff --git a/docs/reference/synonyms/apis/synonyms-apis.asciidoc b/docs/reference/synonyms/apis/synonyms-apis.asciidoc index 2275219e66445..c9de52939b2fe 100644 --- a/docs/reference/synonyms/apis/synonyms-apis.asciidoc +++ b/docs/reference/synonyms/apis/synonyms-apis.asciidoc @@ -19,7 +19,6 @@ Synonyms sets can be used to configure <> process by the <>. NOTE: Synonyms sets are limited to a maximum of 10,000 synonym rules per set. -Synonym sets with more than 10,000 synonym rules will provide inconsistent search results. If you need to manage more synonym rules, you can create multiple synonyms sets. [discrete] diff --git a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc index 93edc0918614d..c7844c520e074 100644 --- a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc @@ -2,7 +2,7 @@ Hybrid search between a semantic and lexical query can be achieved by using an <> as part of your search request. Provide a -`text_expansion` query and a full-text query as +`sparse_vector` query and a full-text query as <> for the `rrf` retriever. The `rrf` retriever uses <> to rank the top documents. @@ -25,11 +25,10 @@ GET my-index/_search { "standard": { "query": { - "text_expansion": { - "my_tokens": { - "model_id": ".elser_model_2", - "model_text": "the query string" - } + "sparse_vector": { + "field": "my_tokens", + "inference_id": "my-elser-endpoint", + "query": "the query string" } } } diff --git a/docs/reference/tab-widgets/semantic-search/search.asciidoc b/docs/reference/tab-widgets/semantic-search/search.asciidoc index 315328add07f0..eb83efff53f06 100644 --- a/docs/reference/tab-widgets/semantic-search/search.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/search.asciidoc @@ -1,35 +1,33 @@ // tag::elser[] -ELSER text embeddings can be queried using a -<>. The text expansion -query enables you to query a rank features field or a sparse vector field, by -providing the model ID of the NLP model, and the query text: +ELSER text embeddings can be queried using a +<>. The sparse vector +query enables you to query a <> field, by +providing the inference ID associated with the NLP model you want to use, and the query text: [source,console] ---- GET my-index/_search { "query":{ - "text_expansion":{ - "my_tokens":{ <1> - "model_id":".elser_model_2", - "model_text":"the query string" - } + "sparse_vector": { + "field": "my_tokens", + "inference_id": "my-elser-endpoint", + "query": "the query string" } } } ---- // TEST[skip:TBD] -<1> The field of type `sparse_vector`. // end::elser[] // tag::dense-vector[] -Text embeddings produced by dense vector models can be queried using a -<>. In the `knn` clause, provide the name of the -dense vector field, and a `query_vector_builder` clause with the model ID and +Text embeddings produced by dense vector models can be queried using a +<>. In the `knn` clause, provide the name of the +dense vector field, and a `query_vector_builder` clause with the model ID and the query text. [source,console] @@ -41,9 +39,9 @@ GET my-index/_search "k": 10, "num_candidates": 100, "query_vector_builder": { - "text_embedding": { - "model_id": "sentence-transformers__msmarco-minilm-l-12-v3", - "model_text": "the query string" + "text_embedding": { + "model_id": "sentence-transformers__msmarco-minilm-l-12-v3", + "model_text": "the query string" } } } @@ -51,4 +49,4 @@ GET my-index/_search ---- // TEST[skip:TBD] -// end::dense-vector[] \ No newline at end of file +// end::dense-vector[] diff --git a/docs/reference/vectors/vector-functions.asciidoc b/docs/reference/vectors/vector-functions.asciidoc index 4e627ef18ec6c..2a80290cf9d3b 100644 --- a/docs/reference/vectors/vector-functions.asciidoc +++ b/docs/reference/vectors/vector-functions.asciidoc @@ -1,4 +1,3 @@ -[role="xpack"] [[vector-functions]] ===== Functions for vector fields @@ -17,6 +16,8 @@ This is the list of available vector functions and vector access methods: 6. <].vectorValue`>> – returns a vector's value as an array of floats 7. <].magnitude`>> – returns a vector's magnitude +NOTE: The `cosineSimilarity` and `dotProduct` functions are not supported for `bit` vectors. + NOTE: The recommended way to access dense vectors is through the `cosineSimilarity`, `dotProduct`, `l1norm` or `l2norm` functions. Please note however, that you should call these functions only once per script. For example, @@ -193,7 +194,7 @@ we added `1` in the denominator. ====== Hamming distance The `hamming` function calculates {wikipedia}/Hamming_distance[Hamming distance] between a given query vector and -document vectors. It is only available for byte vectors. +document vectors. It is only available for byte and bit vectors. [source,console] -------------------------------------------------- @@ -278,10 +279,14 @@ You can access vector values directly through the following functions: - `doc[].vectorValue` – returns a vector's value as an array of floats +NOTE: For `bit` vectors, it does return a `float[]`, where each element represents 8 bits. + - `doc[].magnitude` – returns a vector's magnitude as a float (for vectors created prior to version 7.5 the magnitude is not stored. So this function calculates it anew every time it is called). +NOTE: For `bit` vectors, this is just the square root of the sum of `1` bits. + For example, the script below implements a cosine similarity using these two functions: @@ -319,3 +324,14 @@ GET my-index-000001/_search } } -------------------------------------------------- +[[vector-functions-bit-vectors]] +====== Bit vectors and vector functions + +When using `bit` vectors, not all the vector functions are available. The supported functions are: + +* <> – calculates Hamming distance, the sum of the bitwise XOR of the two vectors +* <> – calculates L^1^ distance, this is simply the `hamming` distance +* <> - calculates L^2^ distance, this is the square root of the `hamming` distance + +Currently, the `cosineSimilarity` and `dotProduct` functions are not supported for `bit` vectors. + diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 5a32d2e0a58cd..792330fd3613b 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -22,6 +22,7 @@ hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" httpclient = "org.apache.httpcomponents:httpclient:4.5.14" idea-ext = "gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:1.1.4" +javaparser = "com.github.javaparser:javaparser-core:3.18.0" json-schema-validator = "com.networknt:json-schema-validator:1.0.72" json-assert = "org.skyscreamer:jsonassert:1.5.0" jackson-core = { group = "com.fasterxml.jackson.core", name="jackson-core", version.ref="jackson" } @@ -39,6 +40,7 @@ mockito-core = "org.mockito:mockito-core:1.9.5" nebula-info = "com.netflix.nebula:gradle-info-plugin:11.3.3" reflections = "org.reflections:reflections:0.9.12" shadow-plugin = "com.github.breskeby:shadow:3b035f2" +snakeyaml = { group = "org.yaml", name = "snakeyaml", version = { strictly = "2.0" } } spock-core = { group = "org.spockframework", name="spock-core", version.ref="spock" } spock-junit4 = { group = "org.spockframework", name="spock-junit4", version.ref="spock" } spock-platform = { group = "org.spockframework", name="spock-bom", version.ref="spock" } diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 6e4beb0953b56..5e26d96c4ca17 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -84,6 +84,11 @@ + + + + + @@ -581,6 +586,11 @@ + + + + + @@ -1689,16 +1699,16 @@ - - - - - + + + + + @@ -1841,6 +1851,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -2724,124 +2754,124 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -3177,6 +3207,11 @@ + + + + + @@ -3802,14 +3837,14 @@ - - - + + + - - - + + + diff --git a/libs/core/src/test/java/org/elasticsearch/core/AbstractRefCountedTests.java b/libs/core/src/test/java/org/elasticsearch/core/AbstractRefCountedTests.java index 9610bae32a775..74dcd19248834 100644 --- a/libs/core/src/test/java/org/elasticsearch/core/AbstractRefCountedTests.java +++ b/libs/core/src/test/java/org/elasticsearch/core/AbstractRefCountedTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.test.ESTestCase; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; @@ -62,32 +61,22 @@ public void testRefCount() { public void testMultiThreaded() throws InterruptedException { final AbstractRefCounted counted = createRefCounted(); - final Thread[] threads = new Thread[randomIntBetween(2, 5)]; - final CountDownLatch latch = new CountDownLatch(1); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - try { - latch.await(); - for (int j = 0; j < 10000; j++) { - assertTrue(counted.hasReferences()); - if (randomBoolean()) { - counted.incRef(); - } else { - assertTrue(counted.tryIncRef()); - } - assertTrue(counted.hasReferences()); - counted.decRef(); + startInParallel(randomIntBetween(2, 5), i -> { + try { + for (int j = 0; j < 10000; j++) { + assertTrue(counted.hasReferences()); + if (randomBoolean()) { + counted.incRef(); + } else { + assertTrue(counted.tryIncRef()); } - } catch (Exception e) { - throw new AssertionError(e); + assertTrue(counted.hasReferences()); + counted.decRef(); } - }); - threads[i].start(); - } - latch.countDown(); - for (Thread thread : threads) { - thread.join(); - } + } catch (Exception e) { + throw new AssertionError(e); + } + }); counted.decRef(); assertFalse(counted.hasReferences()); assertThat( diff --git a/libs/native/jna/src/main/java/module-info.java b/libs/native/jna/src/main/java/module-info.java index 1b95ccc7cdda0..6e8b0847ce030 100644 --- a/libs/native/jna/src/main/java/module-info.java +++ b/libs/native/jna/src/main/java/module-info.java @@ -14,6 +14,7 @@ requires org.elasticsearch.nativeaccess; requires org.elasticsearch.logging; requires com.sun.jna; + requires java.desktop; exports org.elasticsearch.nativeaccess.jna to com.sun.jna; diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java index 0bfdf959f7b58..1403806c595a7 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaKernel32Library.java @@ -13,13 +13,16 @@ import com.sun.jna.NativeLong; import com.sun.jna.Pointer; import com.sun.jna.Structure; +import com.sun.jna.Structure.ByReference; import com.sun.jna.WString; +import com.sun.jna.ptr.IntByReference; import com.sun.jna.win32.StdCallLibrary; import org.elasticsearch.nativeaccess.WindowsFunctions.ConsoleCtrlHandler; import org.elasticsearch.nativeaccess.lib.Kernel32Library; import java.util.List; +import java.util.function.IntConsumer; class JnaKernel32Library implements Kernel32Library { private static class JnaHandle implements Handle { @@ -98,6 +101,38 @@ public long Type() { } } + /** + * Basic limit information for a job object + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx + */ + public static class JnaJobObjectBasicLimitInformation extends Structure implements ByReference, JobObjectBasicLimitInformation { + public byte[] _ignore1 = new byte[16]; + public int LimitFlags; + public byte[] _ignore2 = new byte[20]; + public int ActiveProcessLimit; + public byte[] _ignore3 = new byte[20]; + + public JnaJobObjectBasicLimitInformation() { + super(8); + } + + @Override + protected List getFieldOrder() { + return List.of("_ignore1", "LimitFlags", "_ignore2", "ActiveProcessLimit", "_ignore3"); + } + + @Override + public void setLimitFlags(int v) { + LimitFlags = v; + } + + @Override + public void setActiveProcessLimit(int v) { + ActiveProcessLimit = v; + } + } + /** * JNA adaptation of {@link ConsoleCtrlHandler} */ @@ -125,9 +160,25 @@ private interface NativeFunctions extends StdCallLibrary { boolean SetProcessWorkingSetSize(Pointer handle, SizeT minSize, SizeT maxSize); + int GetCompressedFileSizeW(WString lpFileName, IntByReference lpFileSizeHigh); + int GetShortPathNameW(WString lpszLongPath, char[] lpszShortPath, int cchBuffer); boolean SetConsoleCtrlHandler(StdCallLibrary.StdCallCallback handler, boolean add); + + Pointer CreateJobObjectW(Pointer jobAttributes, String name); + + boolean AssignProcessToJobObject(Pointer job, Pointer process); + + boolean QueryInformationJobObject( + Pointer job, + int infoClass, + JnaJobObjectBasicLimitInformation info, + int infoLength, + Pointer returnLength + ); + + boolean SetInformationJobObject(Pointer job, int infoClass, JnaJobObjectBasicLimitInformation info, int infoLength); } private final NativeFunctions functions; @@ -185,6 +236,15 @@ public boolean SetProcessWorkingSetSize(Handle handle, long minSize, long maxSiz return functions.SetProcessWorkingSetSize(jnaHandle.pointer, new SizeT(minSize), new SizeT(maxSize)); } + @Override + public int GetCompressedFileSizeW(String lpFileName, IntConsumer lpFileSizeHigh) { + var wideFileName = new WString(lpFileName); + var fileSizeHigh = new IntByReference(); + int ret = functions.GetCompressedFileSizeW(wideFileName, fileSizeHigh); + lpFileSizeHigh.accept(fileSizeHigh.getValue()); + return ret; + } + @Override public int GetShortPathNameW(String lpszLongPath, char[] lpszShortPath, int cchBuffer) { var wideFileName = new WString(lpszLongPath); @@ -197,4 +257,42 @@ public boolean SetConsoleCtrlHandler(ConsoleCtrlHandler handler, boolean add) { consoleCtrlHandlerCallback = new NativeHandlerCallback(handler); return functions.SetConsoleCtrlHandler(consoleCtrlHandlerCallback, true); } + + @Override + public Handle CreateJobObjectW() { + return new JnaHandle(functions.CreateJobObjectW(null, null)); + } + + @Override + public boolean AssignProcessToJobObject(Handle job, Handle process) { + assert job instanceof JnaHandle; + assert process instanceof JnaHandle; + var jnaJob = (JnaHandle) job; + var jnaProcess = (JnaHandle) process; + return functions.AssignProcessToJobObject(jnaJob.pointer, jnaProcess.pointer); + } + + @Override + public JobObjectBasicLimitInformation newJobObjectBasicLimitInformation() { + return new JnaJobObjectBasicLimitInformation(); + } + + @Override + public boolean QueryInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JnaHandle; + assert info instanceof JnaJobObjectBasicLimitInformation; + var jnaJob = (JnaHandle) job; + var jnaInfo = (JnaJobObjectBasicLimitInformation) info; + var ret = functions.QueryInformationJobObject(jnaJob.pointer, infoClass, jnaInfo, jnaInfo.size(), null); + return ret; + } + + @Override + public boolean SetInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JnaHandle; + assert info instanceof JnaJobObjectBasicLimitInformation; + var jnaJob = (JnaHandle) job; + var jnaInfo = (JnaJobObjectBasicLimitInformation) info; + return functions.SetInformationJobObject(jnaJob.pointer, infoClass, jnaInfo, jnaInfo.size()); + } } diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaLinuxCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaLinuxCLibrary.java new file mode 100644 index 0000000000000..ca3137ab5df0e --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaLinuxCLibrary.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Library; +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.NativeLong; +import com.sun.jna.Pointer; +import com.sun.jna.Structure; + +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +class JnaLinuxCLibrary implements LinuxCLibrary { + + @Structure.FieldOrder({ "len", "filter" }) + public static final class JnaSockFProg extends Structure implements Structure.ByReference, SockFProg { + public short len; // number of filters + public Pointer filter; // filters + + JnaSockFProg(SockFilter filters[]) { + len = (short) filters.length; + // serialize struct sock_filter * explicitly, its less confusing than the JNA magic we would need + Memory filter = new Memory(len * 8); + ByteBuffer bbuf = filter.getByteBuffer(0, len * 8); + bbuf.order(ByteOrder.nativeOrder()); // little endian + for (SockFilter f : filters) { + bbuf.putShort(f.code()); + bbuf.put(f.jt()); + bbuf.put(f.jf()); + bbuf.putInt(f.k()); + } + this.filter = filter; + } + + @Override + public long address() { + return Pointer.nativeValue(getPointer()); + } + } + + private interface NativeFunctions extends Library { + + /** + * maps to prctl(2) + */ + int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5); + + /** + * used to call seccomp(2), its too new... + * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing + */ + NativeLong syscall(NativeLong number, Object... args); + + int fallocate(int fd, int mode, long offset, long length); + } + + private final NativeFunctions functions; + + JnaLinuxCLibrary() { + try { + this.functions = Native.load("c", NativeFunctions.class); + } catch (UnsatisfiedLinkError e) { + throw new UnsupportedOperationException( + "seccomp unavailable: could not link methods. requires kernel 3.5+ " + + "with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" + ); + } + } + + @Override + public SockFProg newSockFProg(SockFilter[] filters) { + var prog = new JnaSockFProg(filters); + prog.write(); + return prog; + } + + @Override + public int prctl(int option, long arg2, long arg3, long arg4, long arg5) { + return functions.prctl(option, new NativeLong(arg2), new NativeLong(arg3), new NativeLong(arg4), new NativeLong(arg5)); + } + + @Override + public long syscall(long number, int operation, int flags, long address) { + return functions.syscall(new NativeLong(number), operation, flags, address).longValue(); + } + + @Override + public int fallocate(int fd, int mode, long offset, long length) { + return functions.fallocate(fd, mode, offset, length); + } +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaMacCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaMacCLibrary.java new file mode 100644 index 0000000000000..f416cf862b417 --- /dev/null +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaMacCLibrary.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jna; + +import com.sun.jna.Library; +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.ptr.PointerByReference; + +import org.elasticsearch.nativeaccess.lib.MacCLibrary; + +class JnaMacCLibrary implements MacCLibrary { + static class JnaErrorReference implements ErrorReference { + final PointerByReference ref = new PointerByReference(); + + @Override + public String toString() { + return ref.getValue().getString(0); + } + } + + private interface NativeFunctions extends Library { + int sandbox_init(String profile, long flags, PointerByReference errorbuf); + + void sandbox_free_error(Pointer errorbuf); + } + + private final NativeFunctions functions; + + JnaMacCLibrary() { + this.functions = Native.load("c", NativeFunctions.class); + } + + @Override + public ErrorReference newErrorReference() { + return new JnaErrorReference(); + } + + @Override + public int sandbox_init(String profile, long flags, ErrorReference errorbuf) { + assert errorbuf instanceof JnaErrorReference; + var jnaErrorbuf = (JnaErrorReference) errorbuf; + return functions.sandbox_init(profile, flags, jnaErrorbuf.ref); + } + + @Override + public void sandbox_free_error(ErrorReference errorbuf) { + assert errorbuf instanceof JnaErrorReference; + var jnaErrorbuf = (JnaErrorReference) errorbuf; + functions.sandbox_free_error(jnaErrorbuf.ref.getValue()); + } + +} diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 9d34b1ba617e8..454581ae70b51 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -10,6 +10,8 @@ import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; @@ -30,6 +32,10 @@ public JnaNativeLibraryProvider() { JnaJavaLibrary::new, PosixCLibrary.class, JnaPosixCLibrary::new, + LinuxCLibrary.class, + JnaLinuxCLibrary::new, + MacCLibrary.class, + JnaMacCLibrary::new, Kernel32Library.class, JnaKernel32Library::new, SystemdLibrary.class, diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java index 7e8e4f23ab034..ea5bc4c9e3546 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -9,8 +9,10 @@ package org.elasticsearch.nativeaccess.jna; import com.sun.jna.Library; +import com.sun.jna.Memory; import com.sun.jna.Native; import com.sun.jna.NativeLong; +import com.sun.jna.Pointer; import com.sun.jna.Structure; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; @@ -39,6 +41,71 @@ public long rlim_cur() { public long rlim_max() { return rlim_max.longValue(); } + + @Override + public void rlim_cur(long v) { + rlim_cur.setValue(v); + } + + @Override + public void rlim_max(long v) { + rlim_max.setValue(v); + } + } + + public static final class JnaStat64 implements Stat64 { + final Memory memory; + private final int stSizeOffset; + private final int stBlocksOffset; + + JnaStat64(int sizeof, int stSizeOffset, int stBlocksOffset) { + this.memory = new Memory(sizeof); + this.stSizeOffset = stSizeOffset; + this.stBlocksOffset = stBlocksOffset; + } + + @Override + public long st_size() { + return memory.getLong(stSizeOffset); + } + + @Override + public long st_blocks() { + return memory.getLong(stBlocksOffset); + } + } + + public static class JnaFStore implements FStore { + final Memory memory; + + JnaFStore() { + this.memory = new Memory(32); + } + + @Override + public void set_flags(int flags) { + memory.setInt(0, flags); + } + + @Override + public void set_posmode(int posmode) { + memory.setInt(4, posmode); + } + + @Override + public void set_offset(long offset) { + memory.setLong(8, offset); + } + + @Override + public void set_length(long length) { + memory.setLong(16, length); + } + + @Override + public long bytesalloc() { + return memory.getLong(24); + } } private interface NativeFunctions extends Library { @@ -46,15 +113,44 @@ private interface NativeFunctions extends Library { int getrlimit(int resource, JnaRLimit rlimit); + int setrlimit(int resource, JnaRLimit rlimit); + int mlockall(int flags); + int fcntl(int fd, int cmd, Pointer fst); + + int ftruncate(int fd, NativeLong length); + + int open(String filename, int flags, Object... mode); + + int close(int fd); + String strerror(int errno); } + private interface FStat64Function extends Library { + int fstat64(int fd, Pointer stat); + } + + private interface FXStatFunction extends Library { + int __fxstat(int version, int fd, Pointer stat); + } + private final NativeFunctions functions; + private final FStat64Function fstat64; JnaPosixCLibrary() { this.functions = Native.load("c", NativeFunctions.class); + FStat64Function fstat64; + try { + fstat64 = Native.load("c", FStat64Function.class); + } catch (UnsatisfiedLinkError e) { + // TODO: explain + var fxstat = Native.load("c", FXStatFunction.class); + int version = System.getProperty("os.arch").equals("aarch64") ? 0 : 1; + fstat64 = (fd, stat) -> fxstat.__fxstat(version, fd, stat); + } + this.fstat64 = fstat64; } @Override @@ -67,6 +163,11 @@ public RLimit newRLimit() { return new JnaRLimit(); } + @Override + public Stat64 newStat64(int sizeof, int stSizeOffset, int stBlocksOffset) { + return new JnaStat64(sizeof, stSizeOffset, stBlocksOffset); + } + @Override public int getrlimit(int resource, RLimit rlimit) { assert rlimit instanceof JnaRLimit; @@ -74,11 +175,57 @@ public int getrlimit(int resource, RLimit rlimit) { return functions.getrlimit(resource, jnaRlimit); } + @Override + public int setrlimit(int resource, RLimit rlimit) { + assert rlimit instanceof JnaRLimit; + var jnaRlimit = (JnaRLimit) rlimit; + return functions.setrlimit(resource, jnaRlimit); + } + @Override public int mlockall(int flags) { return functions.mlockall(flags); } + @Override + public FStore newFStore() { + return new JnaFStore(); + } + + @Override + public int fcntl(int fd, int cmd, FStore fst) { + assert fst instanceof JnaFStore; + var jnaFst = (JnaFStore) fst; + return functions.fcntl(fd, cmd, jnaFst.memory); + } + + @Override + public int ftruncate(int fd, long length) { + return functions.ftruncate(fd, new NativeLong(length)); + } + + @Override + public int open(String pathname, int flags) { + return functions.open(pathname, flags); + } + + @Override + public int open(String pathname, int flags, int mode) { + return functions.open(pathname, flags, mode); + } + + @Override + public int close(int fd) { + return functions.close(fd); + } + + @Override + public int fstat64(int fd, Stat64 stats) { + assert stats instanceof JnaStat64; + var jnaStats = (JnaStat64) stats; + return fstat64.fstat64(fd, jnaStats.memory); + } + @Override public String strerror(int errno) { return functions.strerror(errno); diff --git a/libs/native/libraries/build.gradle b/libs/native/libraries/build.gradle index b7e6a1c704e6e..8f1a12055bd7e 100644 --- a/libs/native/libraries/build.gradle +++ b/libs/native/libraries/build.gradle @@ -18,7 +18,7 @@ configurations { } var zstdVersion = "1.5.5" -var vecVersion = "1.0.9" +var vecVersion = "1.0.10" repositories { exclusiveContent { diff --git a/libs/native/src/main/java/module-info.java b/libs/native/src/main/java/module-info.java index d895df1be1c56..226503b24832d 100644 --- a/libs/native/src/main/java/module-info.java +++ b/libs/native/src/main/java/module-info.java @@ -19,6 +19,7 @@ to org.elasticsearch.nativeaccess.jna, org.elasticsearch.server, + org.elasticsearch.blobcache, org.elasticsearch.simdvec, org.elasticsearch.systemd; // allows jna to implement a library provider, and ProviderLocator to load it diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java index 80a18a2bc8aa0..c10f57a900ff7 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/AbstractNativeAccess.java @@ -22,6 +22,7 @@ abstract class AbstractNativeAccess implements NativeAccess { private final JavaLibrary javaLib; private final Zstd zstd; protected boolean isMemoryLocked = false; + protected ExecSandboxState execSandboxState = ExecSandboxState.NONE; protected AbstractNativeAccess(String name, NativeLibraryProvider libraryProvider) { this.name = name; @@ -53,4 +54,9 @@ public CloseableByteBuffer newBuffer(int len) { public boolean isMemoryLocked() { return isMemoryLocked; } + + @Override + public ExecSandboxState getExecSandboxState() { + return execSandboxState; + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index 7948dad1df4ad..f6e6035a8aba6 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -8,15 +8,90 @@ package org.elasticsearch.nativeaccess; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFProg; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFilter; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import java.util.Map; + class LinuxNativeAccess extends PosixNativeAccess { - Systemd systemd; + private static final int STATX_BLOCKS = 0x400; /* Want/got stx_blocks */ + + /** the preferred method is seccomp(2), since we can apply to all threads of the process */ + static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17 + static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17 + + /** otherwise, we can use prctl(2), which will at least protect ES application threads */ + static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5 + static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5 + static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23 + static final int PR_SET_SECCOMP = 22; // since Linux 2.6.23 + static final long SECCOMP_MODE_FILTER = 2; // since Linux Linux 3.5 + + // BPF "macros" and constants + static final int BPF_LD = 0x00; + static final int BPF_W = 0x00; + static final int BPF_ABS = 0x20; + static final int BPF_JMP = 0x05; + static final int BPF_JEQ = 0x10; + static final int BPF_JGE = 0x30; + static final int BPF_JGT = 0x20; + static final int BPF_RET = 0x06; + static final int BPF_K = 0x00; + + static SockFilter BPF_STMT(int code, int k) { + return new SockFilter((short) code, (byte) 0, (byte) 0, k); + } + + static SockFilter BPF_JUMP(int code, int k, int jt, int jf) { + return new SockFilter((short) code, (byte) jt, (byte) jf, k); + } + + static final int SECCOMP_RET_ERRNO = 0x00050000; + static final int SECCOMP_RET_DATA = 0x0000FFFF; + static final int SECCOMP_RET_ALLOW = 0x7FFF0000; + + // some errno constants for error checking/handling + static final int EACCES = 0x0D; + static final int EFAULT = 0x0E; + static final int EINVAL = 0x16; + static final int ENOSYS = 0x26; + + // offsets that our BPF checks + // check with offsetof() when adding a new arch, move to Arch if different. + static final int SECCOMP_DATA_NR_OFFSET = 0x00; + static final int SECCOMP_DATA_ARCH_OFFSET = 0x04; + + record Arch( + int audit, // AUDIT_ARCH_XXX constant from linux/audit.h + int limit, // syscall limit (necessary for blacklisting on amd64, to ban 32-bit syscalls) + int fork, // __NR_fork + int vfork, // __NR_vfork + int execve, // __NR_execve + int execveat, // __NR_execveat + int seccomp // __NR_seccomp + ) {} + + /** supported architectures for seccomp keyed by os.arch */ + private static final Map ARCHITECTURES; + static { + ARCHITECTURES = Map.of( + "amd64", + new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317), + "aarch64", + new Arch(0xC00000B7, 0xFFFFFFFF, 1079, 1071, 221, 281, 277) + ); + } + + private final LinuxCLibrary linuxLibc; + private final Systemd systemd; LinuxNativeAccess(NativeLibraryProvider libraryProvider) { - super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8)); + super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8, 64, 144, 48, 64)); + this.linuxLibc = libraryProvider.getLibrary(LinuxCLibrary.class); this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); } @@ -46,4 +121,207 @@ protected void logMemoryLimitInstructions() { \t{} hard memlock unlimited""", user, user, user); logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect."); } + + @Override + protected boolean nativePreallocate(int fd, long currentSize, long newSize) { + final int rc = linuxLibc.fallocate(fd, 0, currentSize, newSize - currentSize); + if (rc != 0) { + logger.warn("fallocate failed: " + libc.strerror(libc.errno())); + return false; + } + return true; + } + + /** + * Installs exec system call filtering for Linux. + *

+ * On Linux exec system call filtering currently supports amd64 and aarch64 architectures. + * It requires Linux kernel 3.5 or above, and {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} + * compiled into the kernel. + *

+ * On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)} + * is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation + * here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method + * which will at least protect elasticsearch application threads. + *

+ * Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls: + *

    + *
  • {@code execve}
  • + *
  • {@code fork}
  • + *
  • {@code vfork}
  • + *
  • {@code execveat}
  • + *
+ * @see + * * http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt + */ + @Override + public void tryInstallExecSandbox() { + // first be defensive: we can give nice errors this way, at the very least. + // also, some of these security features get backported to old versions, checking kernel version here is a big no-no! + String archId = System.getProperty("os.arch"); + final Arch arch = ARCHITECTURES.get(archId); + if (arch == null) { + throw new UnsupportedOperationException("seccomp unavailable: '" + archId + "' architecture unsupported"); + } + + // try to check system calls really are who they claim + // you never know (e.g. https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57) + final int bogusArg = 0xf7a46a5c; + + // test seccomp(BOGUS) + long ret = linuxLibc.syscall(arch.seccomp, bogusArg, 0, 0); + if (ret != -1) { + throw new UnsupportedOperationException("seccomp unavailable: seccomp(BOGUS_OPERATION) returned " + ret); + } else { + int errno = libc.errno(); + switch (errno) { + case ENOSYS: + break; // ok + case EINVAL: + break; // ok + default: + throw new UnsupportedOperationException("seccomp(BOGUS_OPERATION): " + libc.strerror(errno)); + } + } + + // test seccomp(VALID, BOGUS) + ret = linuxLibc.syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, bogusArg, 0); + if (ret != -1) { + throw new UnsupportedOperationException("seccomp unavailable: seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG) returned " + ret); + } else { + int errno = libc.errno(); + switch (errno) { + case ENOSYS: + break; // ok + case EINVAL: + break; // ok + default: + throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + libc.strerror(errno)); + } + } + + // test prctl(BOGUS) + ret = linuxLibc.prctl(bogusArg, 0, 0, 0, 0); + if (ret != -1) { + throw new UnsupportedOperationException("seccomp unavailable: prctl(BOGUS_OPTION) returned " + ret); + } else { + int errno = libc.errno(); + switch (errno) { + case ENOSYS: + break; // ok + case EINVAL: + break; // ok + default: + throw new UnsupportedOperationException("prctl(BOGUS_OPTION): " + libc.strerror(errno)); + } + } + + // now just normal defensive checks + + // check for GET_NO_NEW_PRIVS + switch (linuxLibc.prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0)) { + case 0: + break; // not yet set + case 1: + break; // already set by caller + default: + int errno = libc.errno(); + if (errno == EINVAL) { + // friendly error, this will be the typical case for an old kernel + throw new UnsupportedOperationException( + "seccomp unavailable: requires kernel 3.5+ with" + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" + ); + } else { + throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + libc.strerror(errno)); + } + } + // check for SECCOMP + switch (linuxLibc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0)) { + case 0: + break; // not yet set + case 2: + break; // already in filter mode by caller + default: + int errno = libc.errno(); + if (errno == EINVAL) { + throw new UnsupportedOperationException( + "seccomp unavailable: CONFIG_SECCOMP not compiled into kernel," + + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" + ); + } else { + throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + libc.strerror(errno)); + } + } + // check for SECCOMP_MODE_FILTER + if (linuxLibc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0, 0, 0) != 0) { + int errno = libc.errno(); + switch (errno) { + case EFAULT: + break; // available + case EINVAL: + throw new UnsupportedOperationException( + "seccomp unavailable: CONFIG_SECCOMP_FILTER not" + + " compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" + ); + default: + throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + libc.strerror(errno)); + } + } + + // ok, now set PR_SET_NO_NEW_PRIVS, needed to be able to set a seccomp filter as ordinary user + if (linuxLibc.prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) != 0) { + throw new UnsupportedOperationException("prctl(PR_SET_NO_NEW_PRIVS): " + libc.strerror(libc.errno())); + } + + // check it worked + if (linuxLibc.prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) { + throw new UnsupportedOperationException( + "seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + libc.strerror(libc.errno()) + ); + } + + // BPF installed to check arch, limit, then syscall. + // See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details. + SockFilter insns[] = { + /* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), // + /* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail; + /* 3 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_NR_OFFSET), // + /* 4 */ BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, arch.limit, 5, 0), // if (syscall > LIMIT) goto fail; + /* 5 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.fork, 4, 0), // if (syscall == FORK) goto fail; + /* 6 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.vfork, 3, 0), // if (syscall == VFORK) goto fail; + /* 7 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execve, 2, 0), // if (syscall == EXECVE) goto fail; + /* 8 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execveat, 1, 0), // if (syscall == EXECVEAT) goto fail; + /* 9 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW), // pass: return OK; + /* 10 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EACCES & SECCOMP_RET_DATA)), // fail: return EACCES; + }; + // seccomp takes a long, so we pass it one explicitly to keep the JNA simple + SockFProg prog = linuxLibc.newSockFProg(insns); + + int method = 1; + // install filter, if this works, after this there is no going back! + // first try it with seccomp(SECCOMP_SET_MODE_FILTER), falling back to prctl() + if (linuxLibc.syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, prog.address()) != 0) { + method = 0; + int errno1 = libc.errno(); + if (logger.isDebugEnabled()) { + logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", libc.strerror(errno1)); + } + if (linuxLibc.prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, prog.address(), 0, 0) != 0) { + int errno2 = libc.errno(); + throw new UnsupportedOperationException( + "seccomp(SECCOMP_SET_MODE_FILTER): " + libc.strerror(errno1) + ", prctl(PR_SET_SECCOMP): " + libc.strerror(errno2) + ); + } + } + + // now check that the filter was really installed, we should be in filter mode. + if (linuxLibc.prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) { + throw new UnsupportedOperationException( + "seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + libc.strerror(libc.errno()) + ); + } + + logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app"); + execSandboxState = method == 1 ? ExecSandboxState.ALL_THREADS : ExecSandboxState.EXISTING_THREADS; + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java index 0388c66d3962f..f277c69de3192 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java @@ -8,12 +8,35 @@ package org.elasticsearch.nativeaccess; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary.RLimit; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; class MacNativeAccess extends PosixNativeAccess { + private static final int F_PREALLOCATE = 42; + private static final int F_ALLOCATECONTIG = 0x2; // allocate contiguous space + private static final int F_ALLOCATEALL = 0x4; // allocate all the requested space or no space at all + private static final int F_PEOFPOSMODE = 3; // allocate from the physical end of the file + + /** The only supported flag... */ + static final int SANDBOX_NAMED = 1; + /** Allow everything except process fork and execution */ + static final String SANDBOX_RULES = "(version 1) (allow default) (deny process-fork) (deny process-exec)"; + + private final MacCLibrary macLibc; + MacNativeAccess(NativeLibraryProvider libraryProvider) { - super("MacOS", libraryProvider, new PosixConstants(9223372036854775807L, 5, 1, 6)); + super("MacOS", libraryProvider, new PosixConstants(9223372036854775807L, 5, 1, 6, 512, 144, 96, 104)); + this.macLibc = libraryProvider.getLibrary(MacCLibrary.class); } @Override @@ -25,4 +48,94 @@ protected long getMaxThreads() { protected void logMemoryLimitInstructions() { // we don't have instructions for macos } + + @Override + protected boolean nativePreallocate(int fd, long currentSize, long newSize) { + var fst = libc.newFStore(); + fst.set_flags(F_ALLOCATECONTIG); + fst.set_posmode(F_PEOFPOSMODE); + fst.set_offset(0); + fst.set_length(newSize); + // first, try allocating contiguously + if (libc.fcntl(fd, F_PREALLOCATE, fst) != 0) { + // TODO: log warning? + // that failed, so let us try allocating non-contiguously + fst.set_flags(F_ALLOCATEALL); + if (libc.fcntl(fd, F_PREALLOCATE, fst) != 0) { + // i'm afraid captain dale had to bail + logger.warn("Could not allocate non-contiguous size: " + libc.strerror(libc.errno())); + return false; + } + } + if (libc.ftruncate(fd, newSize) != 0) { + logger.warn("Could not truncate file: " + libc.strerror(libc.errno())); + return false; + } + return true; + } + + /** + * Installs exec system call filtering on MacOS. + *

+ * Two different methods of filtering are used. Since MacOS is BSD based, process creation + * is first restricted with {@code setrlimit(RLIMIT_NPROC)}. + *

+ * Additionally, on Mac OS X Leopard or above, a custom {@code sandbox(7)} ("Seatbelt") profile is installed that + * denies the following rules: + *

    + *
  • {@code process-fork}
  • + *
  • {@code process-exec}
  • + *
+ * @see + * * https://reverse.put.as/wp-content/uploads/2011/06/The-Apple-Sandbox-BHDC2011-Paper.pdf + */ + @Override + public void tryInstallExecSandbox() { + initBsdSandbox(); + initMacSandbox(); + execSandboxState = ExecSandboxState.ALL_THREADS; + } + + @SuppressForbidden(reason = "Java tmp dir is ok") + private static Path createTempRulesFile() throws IOException { + return Files.createTempFile("es", "sb"); + } + + private void initMacSandbox() { + // write rules to a temporary file, which will be passed to sandbox_init() + Path rules; + try { + rules = createTempRulesFile(); + Files.write(rules, Collections.singleton(SANDBOX_RULES)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + try { + var errorRef = macLibc.newErrorReference(); + int ret = macLibc.sandbox_init(rules.toAbsolutePath().toString(), SANDBOX_NAMED, errorRef); + // if sandbox_init() fails, add the message from the OS (e.g. syntax error) and free the buffer + if (ret != 0) { + RuntimeException e = new UnsupportedOperationException("sandbox_init(): " + errorRef.toString()); + macLibc.sandbox_free_error(errorRef); + throw e; + } + logger.debug("OS X seatbelt initialization successful"); + } finally { + IOUtils.deleteFilesIgnoringExceptions(rules); + } + } + + private void initBsdSandbox() { + RLimit limit = libc.newRLimit(); + limit.rlim_cur(0); + limit.rlim_max(0); + // not a standard limit, means something different on linux, etc! + final int RLIMIT_NPROC = 7; + if (libc.setrlimit(RLIMIT_NPROC, limit) != 0) { + throw new UnsupportedOperationException("RLIMIT_NPROC unavailable: " + libc.strerror(libc.errno())); + } + + logger.debug("BSD RLIMIT_NPROC initialization successful"); + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java index 7f91d0425af47..0534bc10e910a 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java @@ -8,7 +8,9 @@ package org.elasticsearch.nativeaccess; +import java.nio.file.Path; import java.util.Optional; +import java.util.OptionalLong; /** * Provides access to native functionality needed by Elastisearch. @@ -44,6 +46,16 @@ static NativeAccess instance() { */ boolean isMemoryLocked(); + /** + * Attempts to install a system call filter to block process execution. + */ + void tryInstallExecSandbox(); + + /** + * Return whether installing the exec system call filters was successful, and to what degree. + */ + ExecSandboxState getExecSandboxState(); + Systemd systemd(); /** @@ -52,6 +64,16 @@ static NativeAccess instance() { */ Zstd getZstd(); + /** + * Retrieves the actual number of bytes of disk storage used to store a specified file. + * + * @param path the path to the file + * @return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file, or empty if the size is invalid + */ + OptionalLong allocatedSizeInBytes(Path path); + + void tryPreallocate(Path file, long size); + /** * Returns an accessor for native functions only available on Windows, or {@code null} if not on Windows. */ @@ -71,4 +93,16 @@ default WindowsFunctions getWindowsFunctions() { * @return the buffer */ CloseableByteBuffer newBuffer(int len); + + /** + * Possible stats for execution filtering. + */ + enum ExecSandboxState { + /** No execution filtering */ + NONE, + /** Exec is blocked for threads that were already created */ + EXISTING_THREADS, + /** Exec is blocked for all current and future threads */ + ALL_THREADS + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java index c0eed4a9ce09b..ffe65548eeb44 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/NoopNativeAccess.java @@ -11,7 +11,9 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import java.nio.file.Path; import java.util.Optional; +import java.util.OptionalLong; class NoopNativeAccess implements NativeAccess { @@ -41,6 +43,27 @@ public boolean isMemoryLocked() { return false; } + @Override + public void tryInstallExecSandbox() { + logger.warn("Cannot install system call filter because native access is not available"); + } + + @Override + public ExecSandboxState getExecSandboxState() { + return ExecSandboxState.NONE; + } + + @Override + public OptionalLong allocatedSizeInBytes(Path path) { + logger.warn("Cannot get allocated size of file [" + path + "] because native access is not available"); + return OptionalLong.empty(); + } + + @Override + public void tryPreallocate(Path file, long size) { + logger.warn("Cannot preallocate file size because native access is not available"); + } + @Override public Systemd systemd() { logger.warn("Cannot get systemd access because native access is not available"); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixConstants.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixConstants.java index 4695ce9ad899c..e767e2b3713ec 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixConstants.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixConstants.java @@ -11,4 +11,13 @@ /** * Code constants on POSIX systems. */ -record PosixConstants(long RLIMIT_INFINITY, int RLIMIT_AS, int RLIMIT_FSIZE, int RLIMIT_MEMLOCK) {} +record PosixConstants( + long RLIMIT_INFINITY, + int RLIMIT_AS, + int RLIMIT_FSIZE, + int RLIMIT_MEMLOCK, + int O_CREAT, + int statStructSize, + int statStructSizeOffset, + int statStructBlocksOffset +) {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java index 8f53d1ec4da64..2ce09e567c284 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java @@ -12,12 +12,17 @@ import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Optional; +import java.util.OptionalLong; abstract class PosixNativeAccess extends AbstractNativeAccess { public static final int MCL_CURRENT = 1; public static final int ENOMEM = 12; + public static final int O_RDONLY = 0; + public static final int O_WRONLY = 1; protected final PosixCLibrary libc; protected final VectorSimilarityFunctions vectorDistance; @@ -121,6 +126,52 @@ public void tryLockMemory() { protected abstract void logMemoryLimitInstructions(); + @Override + public OptionalLong allocatedSizeInBytes(Path path) { + assert Files.isRegularFile(path) : path; + var stats = libc.newStat64(constants.statStructSize(), constants.statStructSizeOffset(), constants.statStructBlocksOffset()); + + int fd = libc.open(path.toAbsolutePath().toString(), O_RDONLY); + if (fd == -1) { + logger.warn("Could not open file [" + path + "] to get allocated size: " + libc.strerror(libc.errno())); + return OptionalLong.empty(); + } + + if (libc.fstat64(fd, stats) != 0) { + logger.warn("Could not get stats for file [" + path + "] to get allocated size: " + libc.strerror(libc.errno())); + return OptionalLong.empty(); + } + if (libc.close(fd) != 0) { + logger.warn("Failed to close file [" + path + "] after getting stats: " + libc.strerror(libc.errno())); + } + return OptionalLong.of(stats.st_blocks() * 512); + } + + @Override + public void tryPreallocate(Path file, long newSize) { + // get fd and current size, then pass to OS variant + int fd = libc.open(file.toAbsolutePath().toString(), O_WRONLY, constants.O_CREAT()); + if (fd == -1) { + logger.warn("Could not open file [" + file + "] to preallocate size: " + libc.strerror(libc.errno())); + return; + } + + var stats = libc.newStat64(constants.statStructSize(), constants.statStructSizeOffset(), constants.statStructBlocksOffset()); + if (libc.fstat64(fd, stats) != 0) { + logger.warn("Could not get stats for file [" + file + "] to preallocate size: " + libc.strerror(libc.errno())); + } else { + if (nativePreallocate(fd, stats.st_size(), newSize)) { + logger.debug("pre-allocated file [{}] to {} bytes", file, newSize); + } // OS specific preallocate logs its own errors + } + + if (libc.close(fd) != 0) { + logger.warn("Could not close file [" + file + "] after trying to preallocate size: " + libc.strerror(libc.errno())); + } + } + + protected abstract boolean nativePreallocate(int fd, long currentSize, long newSize); + @Override public Optional getVectorSimilarityFunctions() { return Optional.ofNullable(vectorDistance); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java index 843cc73fbed02..5b4a5abad3e0a 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java @@ -12,7 +12,11 @@ import org.elasticsearch.nativeaccess.lib.Kernel32Library.Handle; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Optional; +import java.util.OptionalLong; +import java.util.concurrent.atomic.AtomicInteger; import static java.lang.management.ManagementFactory.getMemoryMXBean; @@ -27,6 +31,18 @@ class WindowsNativeAccess extends AbstractNativeAccess { public static final int PAGE_GUARD = 0x0100; public static final int MEM_COMMIT = 0x1000; + private static final int INVALID_FILE_SIZE = -1; + + /** + * Constant for JOBOBJECT_BASIC_LIMIT_INFORMATION in Query/Set InformationJobObject + */ + private static final int JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS = 2; + + /** + * Constant for LimitFlags, indicating a process limit has been set + */ + private static final int JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8; + private final Kernel32Library kernel; private final WindowsFunctions windowsFunctions; @@ -68,6 +84,78 @@ public void tryLockMemory() { // note: no need to close the process handle because GetCurrentProcess returns a pseudo handle } + /** + * Install exec system call filtering on Windows. + *

+ * Process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}. + *

+ * Note: This is not intended as a real sandbox. It is another level of security, mostly intended to annoy + * security researchers and make their lives more difficult in achieving "remote execution" exploits. + */ + @Override + public void tryInstallExecSandbox() { + // create a new Job + Handle job = kernel.CreateJobObjectW(); + if (job == null) { + throw new UnsupportedOperationException("CreateJobObject: " + kernel.GetLastError()); + } + + try { + // retrieve the current basic limits of the job + int clazz = JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS; + var info = kernel.newJobObjectBasicLimitInformation(); + if (kernel.QueryInformationJobObject(job, clazz, info) == false) { + throw new UnsupportedOperationException("QueryInformationJobObject: " + kernel.GetLastError()); + } + // modify the number of active processes to be 1 (exactly the one process we will add to the job). + info.setActiveProcessLimit(1); + info.setLimitFlags(JOB_OBJECT_LIMIT_ACTIVE_PROCESS); + if (kernel.SetInformationJobObject(job, clazz, info) == false) { + throw new UnsupportedOperationException("SetInformationJobObject: " + kernel.GetLastError()); + } + // assign ourselves to the job + if (kernel.AssignProcessToJobObject(job, kernel.GetCurrentProcess()) == false) { + throw new UnsupportedOperationException("AssignProcessToJobObject: " + kernel.GetLastError()); + } + } finally { + kernel.CloseHandle(job); + } + + execSandboxState = ExecSandboxState.ALL_THREADS; + logger.debug("Windows ActiveProcessLimit initialization successful"); + } + + @Override + public OptionalLong allocatedSizeInBytes(Path path) { + assert Files.isRegularFile(path) : path; + String fileName = "\\\\?\\" + path; + AtomicInteger lpFileSizeHigh = new AtomicInteger(); + + final int lpFileSizeLow = kernel.GetCompressedFileSizeW(fileName, lpFileSizeHigh::set); + if (lpFileSizeLow == INVALID_FILE_SIZE) { + logger.warn("Unable to get allocated size of file [{}]. Error code {}", path, kernel.GetLastError()); + return OptionalLong.empty(); + } + + // convert lpFileSizeLow to unsigned long and combine with signed/shifted lpFileSizeHigh + final long allocatedSize = (((long) lpFileSizeHigh.get()) << Integer.SIZE) | Integer.toUnsignedLong(lpFileSizeLow); + if (logger.isTraceEnabled()) { + logger.trace( + "executing native method GetCompressedFileSizeW returned [high={}, low={}, allocated={}] for file [{}]", + lpFileSizeHigh.get(), + lpFileSizeLow, + allocatedSize, + path + ); + } + return OptionalLong.of(allocatedSize); + } + + @Override + public void tryPreallocate(Path file, long size) { + logger.warn("Cannot preallocate file size because operation is not available on Windows"); + } + @Override public ProcessLimits getProcessLimits() { return new ProcessLimits(ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN, ProcessLimits.UNKNOWN); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java index 43337f4532bed..f35d9fde5950d 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java @@ -10,6 +10,8 @@ import org.elasticsearch.nativeaccess.WindowsFunctions.ConsoleCtrlHandler; +import java.util.function.IntConsumer; + public non-sealed interface Kernel32Library extends NativeLibrary { interface Handle {} @@ -81,6 +83,17 @@ interface MemoryBasicInformation { */ boolean SetProcessWorkingSetSize(Handle handle, long minSize, long maxSize); + /** + * Retrieves the actual number of bytes of disk storage used to store a specified file. + * + * https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getcompressedfilesizew + * + * @param lpFileName the path string + * @param lpFileSizeHigh pointer to high-order DWORD for compressed file size (or null if not needed) + * @return the low-order DWORD for compressed file size + */ + int GetCompressedFileSizeW(String lpFileName, IntConsumer lpFileSizeHigh); + /** * Retrieves the short path form of the specified path. * @@ -101,4 +114,65 @@ interface MemoryBasicInformation { * @see SetConsoleCtrlHandler docs */ boolean SetConsoleCtrlHandler(ConsoleCtrlHandler handler, boolean add); + + /** + * Creates or opens a new job object + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx + * Note: the two params to this are omitted because all implementations pass null for them both + * + * @return job handle if the function succeeds + */ + Handle CreateJobObjectW(); + + /** + * Associates a process with an existing job + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx + * + * @param job job handle + * @param process process handle + * @return true if the function succeeds + */ + boolean AssignProcessToJobObject(Handle job, Handle process); + + /** + * Basic limit information for a job object + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx + */ + interface JobObjectBasicLimitInformation { + void setLimitFlags(int v); + + void setActiveProcessLimit(int v); + } + + JobObjectBasicLimitInformation newJobObjectBasicLimitInformation(); + + /** + * Get job limit and state information + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx + * Note: The infoLength parameter is omitted because implementions handle passing it + * Note: The returnLength parameter is omitted because all implementations pass null + * + * @param job job handle + * @param infoClass information class constant + * @param info pointer to information structure + * @return true if the function succeeds + */ + boolean QueryInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info); + + /** + * Set job limit and state information + * + * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx + * Note: The infoLength parameter is omitted because implementions handle passing it + * + * @param job job handle + * @param infoClass information class constant + * @param info pointer to information structure + * @return true if the function succeeds + */ + boolean SetInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info); } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LinuxCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LinuxCLibrary.java new file mode 100644 index 0000000000000..8a2917e136bde --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LinuxCLibrary.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +public non-sealed interface LinuxCLibrary extends NativeLibrary { + + /** + * Corresponds to struct sock_filter + * @param code insn + * @param jt number of insn to jump (skip) if true + * @param jf number of insn to jump (skip) if false + * @param k additional data + */ + record SockFilter(short code, byte jt, byte jf, int k) {} + + interface SockFProg { + long address(); + } + + SockFProg newSockFProg(SockFilter filters[]); + + /** + * maps to prctl(2) + */ + int prctl(int option, long arg2, long arg3, long arg4, long arg5); + + /** + * used to call seccomp(2), its too new... + * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing + */ + long syscall(long number, int operation, int flags, long address); + + int fallocate(int fd, int mode, long offset, long length); +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/MacCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/MacCLibrary.java new file mode 100644 index 0000000000000..b2b2db9c71c90 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/MacCLibrary.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +public non-sealed interface MacCLibrary extends NativeLibrary { + interface ErrorReference {} + + ErrorReference newErrorReference(); + + /** + * maps to sandbox_init(3), since Leopard + */ + int sandbox_init(String profile, long flags, ErrorReference errorbuf); + + /** + * releases memory when an error occurs during initialization (e.g. syntax bug) + */ + void sandbox_free_error(ErrorReference errorbuf); +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index d8098a78935b8..faa0e861dc63f 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,4 +9,5 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, Kernel32Library, SystemdLibrary, VectorLibrary, ZstdLibrary {} +public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, SystemdLibrary, + VectorLibrary, ZstdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java index 96e2a0d0e1cdf..0e7d07d0ad623 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -26,6 +26,10 @@ interface RLimit { long rlim_cur(); long rlim_max(); + + void rlim_cur(long v); + + void rlim_max(long v); } /** @@ -41,6 +45,8 @@ interface RLimit { */ int getrlimit(int resource, RLimit rlimit); + int setrlimit(int resource, RLimit rlimit); + /** * Lock all the current process's virtual address space into RAM. * @param flags flags determining how memory will be locked @@ -49,6 +55,41 @@ interface RLimit { */ int mlockall(int flags); + /** corresponds to struct stat64 */ + interface Stat64 { + long st_size(); + + long st_blocks(); + } + + Stat64 newStat64(int sizeof, int stSizeOffset, int stBlocksOffset); + + int open(String pathname, int flags, int mode); + + int open(String pathname, int flags); + + int close(int fd); + + int fstat64(int fd, Stat64 stats); + + int ftruncate(int fd, long length); + + interface FStore { + void set_flags(int flags); /* IN: flags word */ + + void set_posmode(int posmode); /* IN: indicates offset field */ + + void set_offset(long offset); /* IN: start of the region */ + + void set_length(long length); /* IN: size of the region */ + + long bytesalloc(); /* OUT: number of bytes allocated */ + } + + FStore newFStore(); + + int fcntl(int fd, int cmd, FStore fst); + /** * Return a string description for an error. * diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java index bbfd26bd061d0..a3ddc0d59890d 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java @@ -20,6 +20,7 @@ import java.lang.invoke.MethodHandle; import java.lang.invoke.VarHandle; import java.nio.charset.StandardCharsets; +import java.util.function.IntConsumer; import static java.lang.foreign.MemoryLayout.PathElement.groupElement; import static java.lang.foreign.MemoryLayout.paddingLayout; @@ -57,6 +58,10 @@ class JdkKernel32Library implements Kernel32Library { "SetProcessWorkingSetSize", FunctionDescriptor.of(ADDRESS, JAVA_LONG, JAVA_LONG) ); + private static final MethodHandle GetCompressedFileSizeW$mh = downcallHandleWithError( + "GetCompressedFileSizeW", + FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS) + ); private static final MethodHandle GetShortPathNameW$mh = downcallHandleWithError( "GetShortPathNameW", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT) @@ -72,6 +77,22 @@ class JdkKernel32Library implements Kernel32Library { "handle", ConsoleCtrlHandler_handle$fd ); + private static final MethodHandle CreateJobObjectW$mh = downcallHandleWithError( + "CreateJobObjectW", + FunctionDescriptor.of(ADDRESS, ADDRESS, ADDRESS) + ); + private static final MethodHandle AssignProcessToJobObject$mh = downcallHandleWithError( + "AssignProcessToJobObject", + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, ADDRESS) + ); + private static final MethodHandle QueryInformationJobObject$mh = downcallHandleWithError( + "QueryInformationJobObject", + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, JAVA_INT, ADDRESS, JAVA_INT, ADDRESS) + ); + private static final MethodHandle SetInformationJobObject$mh = downcallHandleWithError( + "SetInformationJobObject", + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, JAVA_INT, ADDRESS, JAVA_INT) + ); private static MethodHandle downcallHandleWithError(String function, FunctionDescriptor functionDescriptor) { return downcallHandle(function, functionDescriptor, CAPTURE_GETLASTERROR_OPTION); @@ -146,6 +167,37 @@ public long Type() { } } + static class JdkJobObjectBasicLimitInformation implements JobObjectBasicLimitInformation { + private static final MemoryLayout layout = MemoryLayout.structLayout( + paddingLayout(16), + JAVA_INT, + paddingLayout(20), + JAVA_INT, + paddingLayout(20) + ).withByteAlignment(8); + + private static final VarHandle LimitFlags$vh = varHandleWithoutOffset(layout, groupElement(1)); + private static final VarHandle ActiveProcessLimit$vh = varHandleWithoutOffset(layout, groupElement(3)); + + private final MemorySegment segment; + + JdkJobObjectBasicLimitInformation() { + var arena = Arena.ofAuto(); + this.segment = arena.allocate(layout); + segment.fill((byte) 0); + } + + @Override + public void setLimitFlags(int v) { + LimitFlags$vh.set(segment, v); + } + + @Override + public void setActiveProcessLimit(int v) { + ActiveProcessLimit$vh.set(segment, v); + } + } + private final MemorySegment lastErrorState; JdkKernel32Library() { @@ -229,6 +281,20 @@ public boolean SetProcessWorkingSetSize(Handle process, long minSize, long maxSi } } + @Override + public int GetCompressedFileSizeW(String lpFileName, IntConsumer lpFileSizeHigh) { + try (Arena arena = Arena.ofConfined()) { + MemorySegment wideFileName = ArenaUtil.allocateFrom(arena, lpFileName + "\0", StandardCharsets.UTF_16LE); + MemorySegment fileSizeHigh = arena.allocate(JAVA_INT); + + int ret = (int) GetCompressedFileSizeW$mh.invokeExact(lastErrorState, wideFileName, fileSizeHigh); + lpFileSizeHigh.accept(fileSizeHigh.get(JAVA_INT, 0)); + return ret; + } catch (Throwable t) { + throw new AssertionError(t); + } + } + @Override public int GetShortPathNameW(String lpszLongPath, char[] lpszShortPath, int cchBuffer) { try (Arena arena = Arena.ofConfined()) { @@ -262,4 +328,73 @@ public boolean SetConsoleCtrlHandler(ConsoleCtrlHandler handler, boolean add) { throw new AssertionError(t); } } + + @Override + public Handle CreateJobObjectW() { + try { + return new JdkHandle((MemorySegment) CreateJobObjectW$mh.invokeExact(lastErrorState, MemorySegment.NULL, MemorySegment.NULL)); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public boolean AssignProcessToJobObject(Handle job, Handle process) { + assert job instanceof JdkHandle; + assert process instanceof JdkHandle; + var jdkJob = (JdkHandle) job; + var jdkProcess = (JdkHandle) process; + + try { + return (boolean) AssignProcessToJobObject$mh.invokeExact(lastErrorState, jdkJob.address, jdkProcess.address); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public JobObjectBasicLimitInformation newJobObjectBasicLimitInformation() { + return new JdkJobObjectBasicLimitInformation(); + } + + @Override + public boolean QueryInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JdkHandle; + assert info instanceof JdkJobObjectBasicLimitInformation; + var jdkJob = (JdkHandle) job; + var jdkInfo = (JdkJobObjectBasicLimitInformation) info; + + try { + return (boolean) QueryInformationJobObject$mh.invokeExact( + lastErrorState, + jdkJob.address, + infoClass, + jdkInfo.segment, + (int) jdkInfo.segment.byteSize(), + MemorySegment.NULL + ); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public boolean SetInformationJobObject(Handle job, int infoClass, JobObjectBasicLimitInformation info) { + assert job instanceof JdkHandle; + assert info instanceof JdkJobObjectBasicLimitInformation; + var jdkJob = (JdkHandle) job; + var jdkInfo = (JdkJobObjectBasicLimitInformation) info; + + try { + return (boolean) SetInformationJobObject$mh.invokeExact( + lastErrorState, + jdkJob.address, + infoClass, + jdkInfo.segment, + (int) jdkInfo.segment.byteSize() + ); + } catch (Throwable t) { + throw new AssertionError(t); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkLinuxCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkLinuxCLibrary.java new file mode 100644 index 0000000000000..a31f212eab382 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkLinuxCLibrary.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; + +import java.lang.foreign.Arena; +import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.Linker; +import java.lang.foreign.MemoryLayout; +import java.lang.foreign.MemorySegment; +import java.lang.invoke.MethodHandle; + +import static java.lang.foreign.MemoryLayout.paddingLayout; +import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_BYTE; +import static java.lang.foreign.ValueLayout.JAVA_INT; +import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static java.lang.foreign.ValueLayout.JAVA_SHORT; +import static org.elasticsearch.nativeaccess.jdk.JdkPosixCLibrary.CAPTURE_ERRNO_OPTION; +import static org.elasticsearch.nativeaccess.jdk.JdkPosixCLibrary.downcallHandleWithErrno; +import static org.elasticsearch.nativeaccess.jdk.JdkPosixCLibrary.errnoState; +import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; + +class JdkLinuxCLibrary implements LinuxCLibrary { + private static final MethodHandle prctl$mh; + static { + try { + prctl$mh = downcallHandleWithErrno( + "prctl", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_LONG, JAVA_LONG, JAVA_LONG, JAVA_LONG) + ); + } catch (UnsatisfiedLinkError e) { + throw new UnsupportedOperationException( + "seccomp unavailable: could not link methods. requires kernel 3.5+ " + + "with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" + ); + } + } + private static final MethodHandle syscall$mh = downcallHandle( + "syscall", + FunctionDescriptor.of(JAVA_LONG, JAVA_LONG, JAVA_INT, JAVA_INT, JAVA_LONG), + CAPTURE_ERRNO_OPTION, + Linker.Option.firstVariadicArg(1) + ); + private static final MethodHandle fallocate$mh = downcallHandleWithErrno( + "fallocate", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, JAVA_LONG, JAVA_LONG) + ); + + private static class JdkSockFProg implements SockFProg { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_SHORT, paddingLayout(6), ADDRESS); + + private final MemorySegment segment; + + JdkSockFProg(SockFilter filters[]) { + Arena arena = Arena.ofAuto(); + this.segment = arena.allocate(layout); + var instSegment = arena.allocate(filters.length * 8L); + segment.set(JAVA_SHORT, 0, (short) filters.length); + segment.set(ADDRESS, 8, instSegment); + + int offset = 0; + for (SockFilter f : filters) { + instSegment.set(JAVA_SHORT, offset, f.code()); + instSegment.set(JAVA_BYTE, offset + 2, f.jt()); + instSegment.set(JAVA_BYTE, offset + 3, f.jf()); + instSegment.set(JAVA_INT, offset + 4, f.k()); + offset += 8; + } + } + + @Override + public long address() { + return segment.address(); + } + } + + @Override + public SockFProg newSockFProg(SockFilter[] filters) { + return new JdkSockFProg(filters); + } + + @Override + public int prctl(int option, long arg2, long arg3, long arg4, long arg5) { + try { + return (int) prctl$mh.invokeExact(errnoState, option, arg2, arg3, arg4, arg5); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long syscall(long number, int operation, int flags, long address) { + try { + return (long) syscall$mh.invokeExact(errnoState, number, operation, flags, address); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public int fallocate(int fd, int mode, long offset, long length) { + try { + return (int) fallocate$mh.invokeExact(errnoState, fd, mode, offset, length); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkMacCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkMacCLibrary.java new file mode 100644 index 0000000000000..b946ca3ca4353 --- /dev/null +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkMacCLibrary.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.jdk; + +import org.elasticsearch.nativeaccess.lib.MacCLibrary; + +import java.lang.foreign.Arena; +import java.lang.foreign.FunctionDescriptor; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.lang.invoke.MethodHandle; + +import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_INT; +import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; + +class JdkMacCLibrary implements MacCLibrary { + + private static final MethodHandle sandbox_init$mh = downcallHandle( + "sandbox_init", + FunctionDescriptor.of(JAVA_INT, ADDRESS, JAVA_LONG, ADDRESS) + ); + private static final MethodHandle sandbox_free_error$mh = downcallHandle("sandbox_free_error", FunctionDescriptor.ofVoid(ADDRESS)); + + private static class JdkErrorReference implements ErrorReference { + final Arena arena = Arena.ofConfined(); + final MemorySegment segment = arena.allocate(ValueLayout.ADDRESS); + + MemorySegment deref() { + return segment.get(ADDRESS, 0); + } + + @Override + public String toString() { + return deref().reinterpret(Long.MAX_VALUE).getUtf8String(0); + } + } + + @Override + public ErrorReference newErrorReference() { + return new JdkErrorReference(); + } + + @Override + public int sandbox_init(String profile, long flags, ErrorReference errorbuf) { + assert errorbuf instanceof JdkErrorReference; + var jdkErrorbuf = (JdkErrorReference) errorbuf; + try (Arena arena = Arena.ofConfined()) { + MemorySegment nativeProfile = MemorySegmentUtil.allocateString(arena, profile); + return (int) sandbox_init$mh.invokeExact(nativeProfile, flags, jdkErrorbuf.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public void sandbox_free_error(ErrorReference errorbuf) { + assert errorbuf instanceof JdkErrorReference; + var jdkErrorbuf = (JdkErrorReference) errorbuf; + try { + sandbox_free_error$mh.invokeExact(jdkErrorbuf.deref()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index d76170a55284c..cbd43a394379b 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -10,6 +10,8 @@ import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; +import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import org.elasticsearch.nativeaccess.lib.SystemdLibrary; @@ -28,6 +30,10 @@ public JdkNativeLibraryProvider() { JdkJavaLibrary::new, PosixCLibrary.class, JdkPosixCLibrary::new, + LinuxCLibrary.class, + JdkLinuxCLibrary::new, + MacCLibrary.class, + JdkMacCLibrary::new, Kernel32Library.class, JdkKernel32Library::new, SystemdLibrary.class, diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java index 43ec9425ccfaa..ddd82916de16f 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -19,6 +19,7 @@ import java.lang.foreign.MemorySegment; import java.lang.foreign.StructLayout; import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; import java.lang.invoke.VarHandle; import static java.lang.foreign.MemoryLayout.PathElement.groupElement; @@ -43,7 +44,43 @@ class JdkPosixCLibrary implements PosixCLibrary { "getrlimit", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS) ); + private static final MethodHandle setrlimit$mh = downcallHandleWithErrno( + "setrlimit", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS) + ); private static final MethodHandle mlockall$mh = downcallHandleWithErrno("mlockall", FunctionDescriptor.of(JAVA_INT, JAVA_INT)); + private static final MethodHandle fcntl$mh = downcallHandle("fcntl", FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, ADDRESS)); + private static final MethodHandle ftruncate$mh = downcallHandle("ftruncate", FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_LONG)); + private static final MethodHandle open$mh = downcallHandle( + "open", + FunctionDescriptor.of(JAVA_INT, ADDRESS, JAVA_INT), + CAPTURE_ERRNO_OPTION, + Linker.Option.firstVariadicArg(2) + ); + private static final MethodHandle openWithMode$mh = downcallHandle( + "open", + FunctionDescriptor.of(JAVA_INT, ADDRESS, JAVA_INT, JAVA_INT), + CAPTURE_ERRNO_OPTION, + Linker.Option.firstVariadicArg(2) + ); + private static final MethodHandle close$mh = downcallHandleWithErrno("close", FunctionDescriptor.of(JAVA_INT, JAVA_INT)); + private static final MethodHandle fstat$mh; + static { + MethodHandle fstat; + try { + fstat = downcallHandleWithErrno("fstat64", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS)); + } catch (LinkageError e) { + // Due to different sizes of the stat structure for 32 vs 64 bit machines, on some systems fstat actually points to + // an internal symbol. So we fall back to looking for that symbol. + int version = System.getProperty("os.arch").equals("aarch64") ? 0 : 1; + fstat = MethodHandles.insertArguments( + downcallHandleWithErrno("__fxstat", FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, ADDRESS)), + 1, + version + ); + } + fstat$mh = fstat; + } static final MemorySegment errnoState = Arena.ofAuto().allocate(CAPTURE_ERRNO_LAYOUT); @@ -80,6 +117,16 @@ public RLimit newRLimit() { return new JdkRLimit(); } + @Override + public Stat64 newStat64(int sizeof, int stSizeOffset, int stBlocksOffset) { + return new JdkStat64(sizeof, stSizeOffset, stBlocksOffset); + } + + @Override + public FStore newFStore() { + return new JdkFStore(); + } + @Override public int getrlimit(int resource, RLimit rlimit) { assert rlimit instanceof JdkRLimit; @@ -91,6 +138,17 @@ public int getrlimit(int resource, RLimit rlimit) { } } + @Override + public int setrlimit(int resource, RLimit rlimit) { + assert rlimit instanceof JdkRLimit; + var jdkRlimit = (JdkRLimit) rlimit; + try { + return (int) setrlimit$mh.invokeExact(errnoState, resource, jdkRlimit.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + @Override public int mlockall(int flags) { try { @@ -100,6 +158,66 @@ public int mlockall(int flags) { } } + @Override + public int fcntl(int fd, int cmd, FStore fst) { + assert fst instanceof JdkFStore; + var jdkFst = (JdkFStore) fst; + try { + return (int) fcntl$mh.invokeExact(errnoState, fd, cmd, jdkFst.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public int ftruncate(int fd, long length) { + try { + return (int) ftruncate$mh.invokeExact(errnoState, fd, length); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public int open(String pathname, int flags) { + try (Arena arena = Arena.ofConfined()) { + MemorySegment nativePathname = MemorySegmentUtil.allocateString(arena, pathname); + return (int) open$mh.invokeExact(errnoState, nativePathname, flags); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public int open(String pathname, int flags, int mode) { + try (Arena arena = Arena.ofConfined()) { + MemorySegment nativePathname = MemorySegmentUtil.allocateString(arena, pathname); + return (int) openWithMode$mh.invokeExact(errnoState, nativePathname, flags, mode); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public int close(int fd) { + try { + return (int) close$mh.invokeExact(errnoState, fd); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public int fstat64(int fd, Stat64 stat64) { + assert stat64 instanceof JdkStat64; + var jdkStat = (JdkStat64) stat64; + try { + return (int) fstat$mh.invokeExact(errnoState, fd, jdkStat.segment); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + static class JdkRLimit implements RLimit { private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_LONG, JAVA_LONG); private static final VarHandle rlim_cur$vh = varHandleWithoutOffset(layout, groupElement(0)); @@ -122,9 +240,83 @@ public long rlim_max() { return (long) rlim_max$vh.get(segment); } + @Override + public void rlim_cur(long v) { + rlim_cur$vh.set(segment, v); + } + + @Override + public void rlim_max(long v) { + rlim_max$vh.set(segment, v); + } + @Override public String toString() { return "JdkRLimit[rlim_cur=" + rlim_cur() + ", rlim_max=" + rlim_max(); } } + + private static class JdkStat64 implements Stat64 { + + private final MemorySegment segment; + private final int stSizeOffset; + private final int stBlocksOffset; + + JdkStat64(int sizeof, int stSizeOffset, int stBlocksOffset) { + this.segment = Arena.ofAuto().allocate(sizeof, 8); + this.stSizeOffset = stSizeOffset; + this.stBlocksOffset = stBlocksOffset; + } + + @Override + public long st_size() { + return segment.get(JAVA_LONG, stSizeOffset); + } + + @Override + public long st_blocks() { + return segment.get(JAVA_LONG, stBlocksOffset); + } + } + + private static class JdkFStore implements FStore { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_INT, JAVA_INT, JAVA_LONG, JAVA_LONG, JAVA_LONG); + private static final VarHandle st_flags$vh = layout.varHandle(groupElement(0)); + private static final VarHandle st_posmode$vh = layout.varHandle(groupElement(1)); + private static final VarHandle st_offset$vh = layout.varHandle(groupElement(2)); + private static final VarHandle st_length$vh = layout.varHandle(groupElement(3)); + private static final VarHandle st_bytesalloc$vh = layout.varHandle(groupElement(4)); + + private final MemorySegment segment; + + JdkFStore() { + var arena = Arena.ofAuto(); + this.segment = arena.allocate(layout); + } + + @Override + public void set_flags(int flags) { + st_flags$vh.set(segment, flags); + } + + @Override + public void set_posmode(int posmode) { + st_posmode$vh.set(segment, posmode); + } + + @Override + public void set_offset(long offset) { + st_offset$vh.get(segment, offset); + } + + @Override + public void set_length(long length) { + st_length$vh.set(segment, length); + } + + @Override + public long bytesalloc() { + return (long) st_bytesalloc$vh.get(segment); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java index db2e7b85c30d0..c92ad654c9b9a 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java @@ -23,6 +23,9 @@ public final class JdkVectorLibrary implements VectorLibrary { + static final MethodHandle dot7u$mh; + static final MethodHandle sqr7u$mh; + static final VectorSimilarityFunctions INSTANCE; static { @@ -32,8 +35,33 @@ public final class JdkVectorLibrary implements VectorLibrary { try { int caps = (int) vecCaps$mh.invokeExact(); if (caps != 0) { + if (caps == 2) { + dot7u$mh = downcallHandle( + "dot7u_2", + FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT), + LinkerHelperUtil.critical() + ); + sqr7u$mh = downcallHandle( + "sqr7u_2", + FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT), + LinkerHelperUtil.critical() + ); + } else { + dot7u$mh = downcallHandle( + "dot7u", + FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT), + LinkerHelperUtil.critical() + ); + sqr7u$mh = downcallHandle( + "sqr7u", + FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT), + LinkerHelperUtil.critical() + ); + } INSTANCE = new JdkVectorSimilarityFunctions(); } else { + dot7u$mh = null; + sqr7u$mh = null; INSTANCE = null; } } catch (Throwable t) { @@ -49,18 +77,6 @@ public VectorSimilarityFunctions getVectorSimilarityFunctions() { } private static final class JdkVectorSimilarityFunctions implements VectorSimilarityFunctions { - - static final MethodHandle dot7u$mh = downcallHandle( - "dot7u", - FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT), - LinkerHelperUtil.critical() - ); - static final MethodHandle sqr7u$mh = downcallHandle( - "sqr7u", - FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT), - LinkerHelperUtil.critical() - ); - /** * Computes the dot product of given unsigned int7 byte vectors. * @@ -103,7 +119,7 @@ static int squareDistance7u(MemorySegment a, MemorySegment b, int length) { private static int dot7u(MemorySegment a, MemorySegment b, int length) { try { - return (int) dot7u$mh.invokeExact(a, b, length); + return (int) JdkVectorLibrary.dot7u$mh.invokeExact(a, b, length); } catch (Throwable t) { throw new AssertionError(t); } @@ -111,7 +127,7 @@ private static int dot7u(MemorySegment a, MemorySegment b, int length) { private static int sqr7u(MemorySegment a, MemorySegment b, int length) { try { - return (int) sqr7u$mh.invokeExact(a, b, length); + return (int) JdkVectorLibrary.sqr7u$mh.invokeExact(a, b, length); } catch (Throwable t) { throw new AssertionError(t); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/SystemCallFilterTests.java similarity index 84% rename from qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java rename to libs/native/src/test/java/org/elasticsearch/nativeaccess/SystemCallFilterTests.java index c62522880869b..d4bac13990898 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/SystemCallFilterTests.java @@ -6,12 +6,16 @@ * Side Public License, v 1. */ -package org.elasticsearch.bootstrap; +package org.elasticsearch.nativeaccess; import org.apache.lucene.util.Constants; import org.elasticsearch.test.ESTestCase; +import static org.apache.lucene.tests.util.LuceneTestCase.assumeTrue; +import static org.junit.Assert.fail; + /** Simple tests system call filter is working. */ +@ESTestCase.WithoutSecurityManager public class SystemCallFilterTests extends ESTestCase { /** command to try to run in tests */ @@ -20,15 +24,18 @@ public class SystemCallFilterTests extends ESTestCase { @Override public void setUp() throws Exception { super.setUp(); - assumeTrue("requires system call filter installation", Natives.isSystemCallFilterInstalled()); + assumeTrue( + "requires system call filter installation", + NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.NONE + ); // otherwise security manager will block the execution, no fun assumeTrue("cannot test with security manager enabled", System.getSecurityManager() == null); // otherwise, since we don't have TSYNC support, rules are not applied to the test thread // (randomizedrunner class initialization happens in its own thread, after the test thread is created) // instead we just forcefully run it for the test thread here. - if (JNANatives.LOCAL_SYSTEM_CALL_FILTER_ALL == false) { + if (NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.ALL_THREADS) { try { - SystemCallFilter.init(createTempDir()); + NativeAccess.instance().tryInstallExecSandbox(); } catch (Exception e) { throw new RuntimeException("unable to forcefully apply system call filter to test thread", e); } diff --git a/libs/preallocate/build.gradle b/libs/preallocate/build.gradle deleted file mode 100644 index a490c7168516e..0000000000000 --- a/libs/preallocate/build.gradle +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -apply plugin: 'elasticsearch.build' - -dependencies { - implementation project(':libs:elasticsearch-core') - implementation project(':libs:elasticsearch-logging') - implementation "net.java.dev.jna:jna:${versions.jna}" -} - -tasks.named('forbiddenApisMain').configure { - replaceSignatureFiles 'jdk-signatures' -} diff --git a/libs/preallocate/src/main/java/module-info.java b/libs/preallocate/src/main/java/module-info.java deleted file mode 100644 index 89c85d95ab2f0..0000000000000 --- a/libs/preallocate/src/main/java/module-info.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -module org.elasticsearch.preallocate { - requires org.elasticsearch.base; - requires org.elasticsearch.logging; - requires com.sun.jna; - - exports org.elasticsearch.preallocate to org.elasticsearch.blobcache, com.sun.jna; - - provides org.elasticsearch.jdk.ModuleQualifiedExportsService with org.elasticsearch.preallocate.PreallocateModuleExportsService; -} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java deleted file mode 100644 index e841b38c0059e..0000000000000 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/AbstractPosixPreallocator.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.preallocate; - -import com.sun.jna.FunctionMapper; -import com.sun.jna.Library; -import com.sun.jna.Native; -import com.sun.jna.NativeLong; -import com.sun.jna.Platform; -import com.sun.jna.Structure; - -import java.io.IOException; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Locale; -import java.util.Map; - -abstract class AbstractPosixPreallocator implements Preallocator { - - /** - * Constants relating to posix libc. - * - * @param SIZEOF_STAT The size of the stat64 structure, ie sizeof(stat64_t), found by importing sys/stat.h - * @param STAT_ST_SIZE_OFFSET The offsite into stat64 at which st_size exists, ie offsetof(stat64_t, st_size), - * found by importing sys/stat.h - * @param O_CREAT The file mode for creating a file upon opening, found by importing fcntl.h - */ - protected record PosixConstants(int SIZEOF_STAT, int STAT_ST_SIZE_OFFSET, int O_CREAT) {} - - private static final int O_WRONLY = 1; - - static final class Stat64 extends Structure implements Structure.ByReference { - public byte[] _ignore1; - public NativeLong st_size = new NativeLong(0); - public byte[] _ignore2; - - Stat64(int sizeof, int stSizeOffset) { - this._ignore1 = new byte[stSizeOffset]; - this._ignore2 = new byte[sizeof - stSizeOffset - 8]; - } - } - - private interface NativeFunctions extends Library { - String strerror(int errno); - - int open(String filename, int flags, Object... mode); - - int close(int fd); - } - - private interface FStat64Function extends Library { - int fstat64(int fd, Stat64 stat); - } - - public static final boolean NATIVES_AVAILABLE; - private static final NativeFunctions functions; - private static final FStat64Function fstat64; - - static { - functions = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return Native.load(Platform.C_LIBRARY_NAME, NativeFunctions.class); - } catch (final UnsatisfiedLinkError e) { - return null; - } - }); - fstat64 = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return Native.load(Platform.C_LIBRARY_NAME, FStat64Function.class); - } catch (final UnsatisfiedLinkError e) { - try { - // on Linux fstat64 isn't available as a symbol, but instead uses a special __ name - var options = Map.of(Library.OPTION_FUNCTION_MAPPER, (FunctionMapper) (lib, method) -> "__fxstat64"); - return Native.load(Platform.C_LIBRARY_NAME, FStat64Function.class, options); - } catch (UnsatisfiedLinkError e2) { - return null; - } - } - }); - NATIVES_AVAILABLE = functions != null && fstat64 != null; - } - - private class PosixNativeFileHandle implements NativeFileHandle { - - private final int fd; - - PosixNativeFileHandle(int fd) { - this.fd = fd; - } - - @Override - public int fd() { - return fd; - } - - @Override - public long getSize() throws IOException { - var stat = new Stat64(constants.SIZEOF_STAT, constants.STAT_ST_SIZE_OFFSET); - if (fstat64.fstat64(fd, stat) == -1) { - throw newIOException("Could not get size of file"); - } - return stat.st_size.longValue(); - } - - @Override - public void close() throws IOException { - if (functions.close(fd) != 0) { - throw newIOException("Could not close file"); - } - } - } - - protected final PosixConstants constants; - - AbstractPosixPreallocator(PosixConstants constants) { - this.constants = constants; - } - - @Override - public boolean useNative() { - return false; - } - - @Override - public NativeFileHandle open(String path) throws IOException { - int fd = functions.open(path, O_WRONLY, constants.O_CREAT); - if (fd < 0) { - throw newIOException(String.format(Locale.ROOT, "Could not open file [%s] for preallocation", path)); - } - return new PosixNativeFileHandle(fd); - } - - @Override - public String error(int errno) { - return functions.strerror(errno); - } - - private static IOException newIOException(String prefix) { - int errno = Native.getLastError(); - return new IOException(String.format(Locale.ROOT, "%s(errno=%d): %s", prefix, errno, functions.strerror(errno))); - } -} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/LinuxPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/LinuxPreallocator.java deleted file mode 100644 index 25ad4a26fd03e..0000000000000 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/LinuxPreallocator.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.preallocate; - -import com.sun.jna.Native; -import com.sun.jna.Platform; - -import java.security.AccessController; -import java.security.PrivilegedAction; - -final class LinuxPreallocator extends AbstractPosixPreallocator { - - LinuxPreallocator() { - super(new PosixConstants(144, 48, 64)); - } - - @Override - public boolean useNative() { - return Natives.NATIVES_AVAILABLE && super.useNative(); - } - - @Override - public int preallocate(final int fd, final long currentSize, final long fileSize) { - final int rc = Natives.fallocate(fd, 0, currentSize, fileSize - currentSize); - return rc == 0 ? 0 : Native.getLastError(); - } - - private static class Natives { - - public static final boolean NATIVES_AVAILABLE; - - static { - NATIVES_AVAILABLE = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - Native.register(Natives.class, Platform.C_LIBRARY_NAME); - } catch (final UnsatisfiedLinkError e) { - return false; - } - return true; - }); - } - - static native int fallocate(int fd, int mode, long offset, long length); - } - -} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java deleted file mode 100644 index 149cf80527bd0..0000000000000 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/MacOsPreallocator.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.preallocate; - -import com.sun.jna.Native; -import com.sun.jna.NativeLong; -import com.sun.jna.Platform; -import com.sun.jna.Structure; - -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.List; - -final class MacOsPreallocator extends AbstractPosixPreallocator { - - MacOsPreallocator() { - super(new PosixConstants(144, 96, 512)); - } - - @Override - public boolean useNative() { - return Natives.NATIVES_AVAILABLE && super.useNative(); - } - - @Override - public int preallocate(final int fd, final long currentSize /* unused */ , final long fileSize) { - // the Structure.ByReference constructor requires access to declared members - final Natives.Fcntl.FStore fst = AccessController.doPrivileged((PrivilegedAction) Natives.Fcntl.FStore::new); - fst.fst_flags = Natives.Fcntl.F_ALLOCATECONTIG; - fst.fst_posmode = Natives.Fcntl.F_PEOFPOSMODE; - fst.fst_offset = new NativeLong(0); - fst.fst_length = new NativeLong(fileSize); - // first, try allocating contiguously - if (Natives.fcntl(fd, Natives.Fcntl.F_PREALLOCATE, fst) != 0) { - // that failed, so let us try allocating non-contiguously - fst.fst_flags = Natives.Fcntl.F_ALLOCATEALL; - if (Natives.fcntl(fd, Natives.Fcntl.F_PREALLOCATE, fst) != 0) { - // i'm afraid captain dale had to bail - return Native.getLastError(); - } - } - if (Natives.ftruncate(fd, new NativeLong(fileSize)) != 0) { - return Native.getLastError(); - } - return 0; - } - - private static class Natives { - - static boolean NATIVES_AVAILABLE; - - static { - NATIVES_AVAILABLE = AccessController.doPrivileged((PrivilegedAction) () -> { - try { - Native.register(Natives.class, Platform.C_LIBRARY_NAME); - } catch (final UnsatisfiedLinkError e) { - return false; - } - return true; - }); - } - - static class Fcntl { - private static final int F_PREALLOCATE = 42; - - // allocate flags; these might be unused, but are here for reference - @SuppressWarnings("unused") - private static final int F_ALLOCATECONTIG = 0x00000002; // allocate contiguous space - private static final int F_ALLOCATEALL = 0x00000004; // allocate all the requested space or no space at all - - // position modes; these might be unused, but are here for reference - private static final int F_PEOFPOSMODE = 3; // allocate from the physical end of the file - @SuppressWarnings("unused") - private static final int F_VOLPOSMODE = 4; // allocate from the volume offset - - public static final class FStore extends Structure implements Structure.ByReference { - public int fst_flags = 0; - public int fst_posmode = 0; - public NativeLong fst_offset = new NativeLong(0); - public NativeLong fst_length = new NativeLong(0); - @SuppressWarnings("unused") - public NativeLong fst_bytesalloc = new NativeLong(0); - - @Override - protected List getFieldOrder() { - return Arrays.asList("fst_flags", "fst_posmode", "fst_offset", "fst_length", "fst_bytesalloc"); - } - - } - } - - static native int fcntl(int fd, int cmd, Fcntl.FStore fst); - - static native int ftruncate(int fd, NativeLong length); - } - -} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/NoNativePreallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/NoNativePreallocator.java deleted file mode 100644 index 447b178ba41d9..0000000000000 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/NoNativePreallocator.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.preallocate; - -import java.io.IOException; - -final class NoNativePreallocator implements Preallocator { - - @Override - public boolean useNative() { - return false; - } - - @Override - public NativeFileHandle open(String path) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public int preallocate(final int fd, final long currentSize, final long fileSize) { - throw new UnsupportedOperationException(); - } - - @Override - public String error(final int errno) { - throw new UnsupportedOperationException(); - } - -} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java deleted file mode 100644 index 8f7214e0877ba..0000000000000 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocate.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.preallocate; - -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.logging.LogManager; -import org.elasticsearch.logging.Logger; -import org.elasticsearch.preallocate.Preallocator.NativeFileHandle; - -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.lang.reflect.Field; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.PrivilegedExceptionAction; - -public class Preallocate { - - private static final Logger logger = LogManager.getLogger(Preallocate.class); - - private static final boolean IS_LINUX; - private static final boolean IS_MACOS; - static { - String osName = System.getProperty("os.name"); - IS_LINUX = osName.startsWith("Linux"); - IS_MACOS = osName.startsWith("Mac OS X"); - } - - public static void preallocate(final Path cacheFile, final long fileSize) throws IOException { - if (IS_LINUX) { - preallocate(cacheFile, fileSize, new LinuxPreallocator()); - } else if (IS_MACOS) { - preallocate(cacheFile, fileSize, new MacOsPreallocator()); - } else { - preallocate(cacheFile, fileSize, new NoNativePreallocator()); - } - } - - @SuppressForbidden(reason = "need access to toFile for RandomAccessFile") - private static void preallocate(final Path cacheFile, final long fileSize, final Preallocator prealloactor) throws IOException { - boolean success = false; - try { - if (prealloactor.useNative()) { - try (NativeFileHandle openFile = prealloactor.open(cacheFile.toAbsolutePath().toString())) { - long currentSize = openFile.getSize(); - if (currentSize < fileSize) { - logger.info("pre-allocating cache file [{}] ({} bytes) using native methods", cacheFile, fileSize); - final int errno = prealloactor.preallocate(openFile.fd(), currentSize, fileSize - currentSize); - if (errno == 0) { - success = true; - logger.debug("pre-allocated cache file [{}] using native methods", cacheFile); - } else { - logger.warn( - "failed to pre-allocate cache file [{}] using native methods, errno: [{}], error: [{}]", - cacheFile, - errno, - prealloactor.error(errno) - ); - } - } - } catch (final Exception e) { - logger.warn(() -> "failed to pre-allocate cache file [" + cacheFile + "] using native methods", e); - } - } - // even if allocation was successful above, verify again here - try (RandomAccessFile raf = new RandomAccessFile(cacheFile.toFile(), "rw")) { - if (raf.length() != fileSize) { - logger.info("pre-allocating cache file [{}] ({} bytes) using setLength method", cacheFile, fileSize); - raf.setLength(fileSize); - logger.debug("pre-allocated cache file [{}] using setLength method", cacheFile); - } - success = raf.length() == fileSize; - } catch (final Exception e) { - logger.warn(() -> "failed to pre-allocate cache file [" + cacheFile + "] using setLength method", e); - throw e; - } - } finally { - if (success == false) { - // if anything goes wrong, delete the potentially created file to not waste disk space - Files.deleteIfExists(cacheFile); - } - } - } - - @SuppressForbidden(reason = "need access to fd on FileOutputStream") - private static class FileDescriptorFieldAction implements PrivilegedExceptionAction { - - private final FileOutputStream fileOutputStream; - - private FileDescriptorFieldAction(FileOutputStream fileOutputStream) { - this.fileOutputStream = fileOutputStream; - } - - @Override - public Field run() throws IOException, NoSuchFieldException { - // accessDeclaredMembers - final Field f = fileOutputStream.getFD().getClass().getDeclaredField("fd"); - // suppressAccessChecks - f.setAccessible(true); - return f; - } - } - -} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/PreallocateModuleExportsService.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/PreallocateModuleExportsService.java deleted file mode 100644 index dd0c4236f2c75..0000000000000 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/PreallocateModuleExportsService.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.preallocate; - -import org.elasticsearch.jdk.ModuleQualifiedExportsService; - -public class PreallocateModuleExportsService extends ModuleQualifiedExportsService { - - @Override - protected void addExports(String pkg, Module target) { - module.addExports(pkg, target); - } - - @Override - protected void addOpens(String pkg, Module target) { - module.addOpens(pkg, target); - } -} diff --git a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocator.java b/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocator.java deleted file mode 100644 index b70b3ff03f4bd..0000000000000 --- a/libs/preallocate/src/main/java/org/elasticsearch/preallocate/Preallocator.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.preallocate; - -import java.io.IOException; - -/** - * Represents platform native methods for pre-allocating files. - */ -interface Preallocator { - - /** A handle for an open file */ - interface NativeFileHandle extends AutoCloseable { - /** A valid native file descriptor */ - int fd(); - - /** Retrieves the current size of the file */ - long getSize() throws IOException; - } - - /** - * Returns if native methods for pre-allocating files are available. - * - * @return true if native methods are available, otherwise false - */ - boolean useNative(); - - /** - * Open a file for preallocation. - * - * @param path The absolute path to the file to be opened - * @return a handle to the open file that may be used for preallocate - */ - NativeFileHandle open(String path) throws IOException; - - /** - * Pre-allocate a file of given current size to the specified size using the given file descriptor. - * - * @param fd the file descriptor - * @param currentSize the current size of the file - * @param fileSize the size to pre-allocate - * @return 0 upon success - */ - int preallocate(int fd, long currentSize, long fileSize); - - /** - * Provide a string representation of the given error number. - * - * @param errno the error number - * @return the error message - */ - String error(int errno); - -} diff --git a/libs/simdvec/native/Dockerfile b/libs/simdvec/native/Dockerfile.aarch64 similarity index 100% rename from libs/simdvec/native/Dockerfile rename to libs/simdvec/native/Dockerfile.aarch64 diff --git a/libs/simdvec/native/Dockerfile.amd64 b/libs/simdvec/native/Dockerfile.amd64 new file mode 100644 index 0000000000000..77acf8e42cdd2 --- /dev/null +++ b/libs/simdvec/native/Dockerfile.amd64 @@ -0,0 +1,16 @@ +FROM debian:latest + +RUN apt update +RUN apt install -y wget +RUN echo "deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-18 main" > /etc/apt/sources.list.d/clang.list +RUN wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc +RUN apt update +RUN apt install -y clang-18 openjdk-17-jdk +RUN ln -s /usr/bin/clang-18 /usr/bin/clang +RUN ln -s /usr/bin/clang++-18 /usr/bin/clang++ +COPY . /workspace +WORKDIR /workspace +RUN ./gradlew --quiet --console=plain clean buildSharedLibrary +RUN strip --strip-unneeded build/output/libvec.so + +CMD cat build/output/libvec.so diff --git a/libs/simdvec/native/build.gradle b/libs/simdvec/native/build.gradle index ef9120680646a..073477c3aebf2 100644 --- a/libs/simdvec/native/build.gradle +++ b/libs/simdvec/native/build.gradle @@ -6,14 +6,15 @@ * Side Public License, v 1. */ apply plugin: 'c' +apply plugin: 'cpp' var os = org.gradle.internal.os.OperatingSystem.current() // To update this library run publish_vec_binaries.sh ( or ./gradlew vecSharedLibrary ) // Or // For local development, build the docker image with: -// docker build --platform linux/arm64 --progress=plain . (for aarch64) -// docker build --platform linux/amd64 --progress=plain . (for x64) +// docker build --platform linux/arm64 --progress=plain --file=Dockerfile.aarch64 . (for aarch64) +// docker build --platform linux/amd64 --progress=plain --file=Dockerfile.amd64 . (for x64) // Grab the image id from the console output, then, e.g. // docker run 9c9f36564c148b275aeecc42749e7b4580ded79dcf51ff6ccc008c8861e7a979 > build/libs/vec/shared/$arch/libvec.so // @@ -51,6 +52,8 @@ model { target("amd64") { cCompiler.executable = "/usr/bin/gcc" cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=core-avx2", "-Wno-incompatible-pointer-types"]) } + cppCompiler.executable = "/usr/bin/g++" + cppCompiler.withArguments { args -> args.addAll(["-O3", "-march=core-avx2"]) } } } cl(VisualCpp) { @@ -68,6 +71,7 @@ model { target("amd64") { cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=core-avx2"]) } + cppCompiler.withArguments { args -> args.addAll(["-O3", "-march=core-avx2"]) } } } } @@ -86,6 +90,15 @@ model { srcDir "src/vec/headers/" } } + cpp { + source { + srcDir "src/vec/c/${platformName}/" + include "*.cpp" + } + exportedHeaders { + srcDir "src/vec/headers/" + } + } } } } diff --git a/libs/simdvec/native/publish_vec_binaries.sh b/libs/simdvec/native/publish_vec_binaries.sh index d11645ff71c4a..ddb3d2c71e448 100755 --- a/libs/simdvec/native/publish_vec_binaries.sh +++ b/libs/simdvec/native/publish_vec_binaries.sh @@ -19,7 +19,7 @@ if [ -z "$ARTIFACTORY_API_KEY" ]; then exit 1; fi -VERSION="1.0.9" +VERSION="1.0.10" ARTIFACTORY_REPOSITORY="${ARTIFACTORY_REPOSITORY:-https://artifactory.elastic.dev/artifactory/elasticsearch-native/}" TEMP=$(mktemp -d) @@ -33,11 +33,11 @@ echo 'Building Darwin binary...' echo 'Building Linux binary...' mkdir -p build/libs/vec/shared/aarch64/ -DOCKER_IMAGE=$(docker build --platform linux/arm64 --quiet .) +DOCKER_IMAGE=$(docker build --platform linux/arm64 --quiet --file=Dockerfile.aarch64 .) docker run $DOCKER_IMAGE > build/libs/vec/shared/aarch64/libvec.so echo 'Building Linux x64 binary...' -DOCKER_IMAGE=$(docker build --platform linux/amd64 --quiet .) +DOCKER_IMAGE=$(docker build --platform linux/amd64 --quiet --file=Dockerfile.amd64 .) mkdir -p build/libs/vec/shared/amd64 docker run --platform linux/amd64 $DOCKER_IMAGE > build/libs/vec/shared/amd64/libvec.so diff --git a/libs/simdvec/native/src/vec/c/amd64/vec.c b/libs/simdvec/native/src/vec/c/amd64/vec.c index c9a49ad2d1d4d..0fa17109fac6b 100644 --- a/libs/simdvec/native/src/vec/c/amd64/vec.c +++ b/libs/simdvec/native/src/vec/c/amd64/vec.c @@ -13,20 +13,16 @@ #include #include -#ifndef DOT7U_STRIDE_BYTES_LEN -#define DOT7U_STRIDE_BYTES_LEN 32 // Must be a power of 2 -#endif - -#ifndef SQR7U_STRIDE_BYTES_LEN -#define SQR7U_STRIDE_BYTES_LEN 32 // Must be a power of 2 +#ifndef STRIDE_BYTES_LEN +#define STRIDE_BYTES_LEN sizeof(__m256i) // Must be a power of 2 #endif #ifdef _MSC_VER #include -#elif __GNUC__ -#include #elif __clang__ #include +#elif __GNUC__ +#include #endif // Multi-platform CPUID "intrinsic"; it takes as input a "functionNumber" (or "leaf", the eax registry). "Subleaf" @@ -67,9 +63,19 @@ EXPORT int vec_caps() { if (functionIds >= 7) { cpuid(cpuInfo, 7); int ebx = cpuInfo[1]; + int ecx = cpuInfo[2]; // AVX2 flag is the 5th bit // We assume that all processors that have AVX2 also have FMA3 - return (ebx & (1 << 5)) != 0; + int avx2 = (ebx & 0x00000020) != 0; + int avx512 = (ebx & 0x00010000) != 0; + // int avx512_vnni = (ecx & 0x00000800) != 0; + // if (avx512 && avx512_vnni) { + if (avx512) { + return 2; + } + if (avx2) { + return 1; + } } return 0; } @@ -81,7 +87,7 @@ static inline int32_t dot7u_inner(int8_t* a, int8_t* b, size_t dims) { __m256i acc1 = _mm256_setzero_si256(); #pragma GCC unroll 4 - for(int i = 0; i < dims; i += DOT7U_STRIDE_BYTES_LEN) { + for(int i = 0; i < dims; i += STRIDE_BYTES_LEN) { // Load packed 8-bit integers __m256i va1 = _mm256_loadu_si256(a + i); __m256i vb1 = _mm256_loadu_si256(b + i); @@ -101,8 +107,8 @@ static inline int32_t dot7u_inner(int8_t* a, int8_t* b, size_t dims) { EXPORT int32_t dot7u(int8_t* a, int8_t* b, size_t dims) { int32_t res = 0; int i = 0; - if (dims > DOT7U_STRIDE_BYTES_LEN) { - i += dims & ~(DOT7U_STRIDE_BYTES_LEN - 1); + if (dims > STRIDE_BYTES_LEN) { + i += dims & ~(STRIDE_BYTES_LEN - 1); res = dot7u_inner(a, b, i); } for (; i < dims; i++) { @@ -118,7 +124,7 @@ static inline int32_t sqr7u_inner(int8_t *a, int8_t *b, size_t dims) { const __m256i ones = _mm256_set1_epi16(1); #pragma GCC unroll 4 - for(int i = 0; i < dims; i += SQR7U_STRIDE_BYTES_LEN) { + for(int i = 0; i < dims; i += STRIDE_BYTES_LEN) { // Load packed 8-bit integers __m256i va1 = _mm256_loadu_si256(a + i); __m256i vb1 = _mm256_loadu_si256(b + i); @@ -126,7 +132,6 @@ static inline int32_t sqr7u_inner(int8_t *a, int8_t *b, size_t dims) { const __m256i dist1 = _mm256_sub_epi8(va1, vb1); const __m256i abs_dist1 = _mm256_sign_epi8(dist1, dist1); const __m256i sqr1 = _mm256_maddubs_epi16(abs_dist1, abs_dist1); - acc1 = _mm256_add_epi32(_mm256_madd_epi16(ones, sqr1), acc1); } @@ -137,8 +142,8 @@ static inline int32_t sqr7u_inner(int8_t *a, int8_t *b, size_t dims) { EXPORT int32_t sqr7u(int8_t* a, int8_t* b, size_t dims) { int32_t res = 0; int i = 0; - if (dims > SQR7U_STRIDE_BYTES_LEN) { - i += dims & ~(SQR7U_STRIDE_BYTES_LEN - 1); + if (dims > STRIDE_BYTES_LEN) { + i += dims & ~(STRIDE_BYTES_LEN - 1); res = sqr7u_inner(a, b, i); } for (; i < dims; i++) { @@ -147,4 +152,3 @@ EXPORT int32_t sqr7u(int8_t* a, int8_t* b, size_t dims) { } return res; } - diff --git a/libs/simdvec/native/src/vec/c/amd64/vec_2.cpp b/libs/simdvec/native/src/vec/c/amd64/vec_2.cpp new file mode 100644 index 0000000000000..1606b31907405 --- /dev/null +++ b/libs/simdvec/native/src/vec/c/amd64/vec_2.cpp @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +#include +#include +#include "vec.h" + +#ifdef _MSC_VER +#include +#elif __clang__ +#pragma clang attribute push(__attribute__((target("arch=skylake-avx512"))), apply_to=function) +#include +#elif __GNUC__ +#pragma GCC push_options +#pragma GCC target ("arch=skylake-avx512") +#include +#endif + +#include +#include + +#ifndef STRIDE_BYTES_LEN +#define STRIDE_BYTES_LEN sizeof(__m512i) // Must be a power of 2 +#endif + +// Returns acc + ( p1 * p2 ), for 64-wide int lanes. +template +inline __m512i fma8(__m512i acc, const int8_t* p1, const int8_t* p2) { + constexpr int lanes = offsetRegs * STRIDE_BYTES_LEN; + const __m512i a = _mm512_loadu_si512((const __m512i*)(p1 + lanes)); + const __m512i b = _mm512_loadu_si512((const __m512i*)(p2 + lanes)); + // Perform multiplication and create 16-bit values + // Vertically multiply each unsigned 8-bit integer from a with the corresponding + // signed 8-bit integer from b, producing intermediate signed 16-bit integers. + // These values will be at max 32385, at min −32640 + const __m512i dot = _mm512_maddubs_epi16(a, b); + const __m512i ones = _mm512_set1_epi16(1); + // Horizontally add adjacent pairs of intermediate signed 16-bit ints, and pack the results in 32-bit ints. + // Using madd with 1, as this is faster than extract 2 halves, add 16-bit ints, and convert to 32-bit ints. + return _mm512_add_epi32(_mm512_madd_epi16(ones, dot), acc); +} + +static inline int32_t dot7u_inner_avx512(int8_t* a, int8_t* b, size_t dims) { + constexpr int stride8 = 8 * STRIDE_BYTES_LEN; + constexpr int stride4 = 4 * STRIDE_BYTES_LEN; + const int8_t* p1 = a; + const int8_t* p2 = b; + + // Init accumulator(s) with 0 + __m512i acc0 = _mm512_setzero_si512(); + __m512i acc1 = _mm512_setzero_si512(); + __m512i acc2 = _mm512_setzero_si512(); + __m512i acc3 = _mm512_setzero_si512(); + __m512i acc4 = _mm512_setzero_si512(); + __m512i acc5 = _mm512_setzero_si512(); + __m512i acc6 = _mm512_setzero_si512(); + __m512i acc7 = _mm512_setzero_si512(); + + const int8_t* p1End = a + (dims & ~(stride8 - 1)); + while (p1 < p1End) { + acc0 = fma8<0>(acc0, p1, p2); + acc1 = fma8<1>(acc1, p1, p2); + acc2 = fma8<2>(acc2, p1, p2); + acc3 = fma8<3>(acc3, p1, p2); + acc4 = fma8<4>(acc4, p1, p2); + acc5 = fma8<5>(acc5, p1, p2); + acc6 = fma8<6>(acc6, p1, p2); + acc7 = fma8<7>(acc7, p1, p2); + p1 += stride8; + p2 += stride8; + } + + p1End = a + (dims & ~(stride4 - 1)); + while (p1 < p1End) { + acc0 = fma8<0>(acc0, p1, p2); + acc1 = fma8<1>(acc1, p1, p2); + acc2 = fma8<2>(acc2, p1, p2); + acc3 = fma8<3>(acc3, p1, p2); + p1 += stride4; + p2 += stride4; + } + + p1End = a + (dims & ~(STRIDE_BYTES_LEN - 1)); + while (p1 < p1End) { + acc0 = fma8<0>(acc0, p1, p2); + p1 += STRIDE_BYTES_LEN; + p2 += STRIDE_BYTES_LEN; + } + + // reduce (accumulate all) + acc0 = _mm512_add_epi32(_mm512_add_epi32(acc0, acc1), _mm512_add_epi32(acc2, acc3)); + acc4 = _mm512_add_epi32(_mm512_add_epi32(acc4, acc5), _mm512_add_epi32(acc6, acc7)); + return _mm512_reduce_add_epi32(_mm512_add_epi32(acc0, acc4)); +} + +extern "C" +EXPORT int32_t dot7u_2(int8_t* a, int8_t* b, size_t dims) { + int32_t res = 0; + int i = 0; + if (dims > STRIDE_BYTES_LEN) { + i += dims & ~(STRIDE_BYTES_LEN - 1); + res = dot7u_inner_avx512(a, b, i); + } + for (; i < dims; i++) { + res += a[i] * b[i]; + } + return res; +} + +template +inline __m512i sqr8(__m512i acc, const int8_t* p1, const int8_t* p2) { + constexpr int lanes = offsetRegs * STRIDE_BYTES_LEN; + const __m512i a = _mm512_loadu_si512((const __m512i*)(p1 + lanes)); + const __m512i b = _mm512_loadu_si512((const __m512i*)(p2 + lanes)); + + const __m512i dist = _mm512_sub_epi8(a, b); + const __m512i abs_dist = _mm512_abs_epi8(dist); + const __m512i sqr_add = _mm512_maddubs_epi16(abs_dist, abs_dist); + const __m512i ones = _mm512_set1_epi16(1); + // Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the results. + return _mm512_add_epi32(_mm512_madd_epi16(ones, sqr_add), acc); +} + +static inline int32_t sqr7u_inner_avx512(int8_t *a, int8_t *b, size_t dims) { + constexpr int stride8 = 8 * STRIDE_BYTES_LEN; + constexpr int stride4 = 4 * STRIDE_BYTES_LEN; + const int8_t* p1 = a; + const int8_t* p2 = b; + + // Init accumulator(s) with 0 + __m512i acc0 = _mm512_setzero_si512(); + __m512i acc1 = _mm512_setzero_si512(); + __m512i acc2 = _mm512_setzero_si512(); + __m512i acc3 = _mm512_setzero_si512(); + __m512i acc4 = _mm512_setzero_si512(); + __m512i acc5 = _mm512_setzero_si512(); + __m512i acc6 = _mm512_setzero_si512(); + __m512i acc7 = _mm512_setzero_si512(); + + const int8_t* p1End = a + (dims & ~(stride8 - 1)); + while (p1 < p1End) { + acc0 = sqr8<0>(acc0, p1, p2); + acc1 = sqr8<1>(acc1, p1, p2); + acc2 = sqr8<2>(acc2, p1, p2); + acc3 = sqr8<3>(acc3, p1, p2); + acc4 = sqr8<4>(acc4, p1, p2); + acc5 = sqr8<5>(acc5, p1, p2); + acc6 = sqr8<6>(acc6, p1, p2); + acc7 = sqr8<7>(acc7, p1, p2); + p1 += stride8; + p2 += stride8; + } + + p1End = a + (dims & ~(stride4 - 1)); + while (p1 < p1End) { + acc0 = sqr8<0>(acc0, p1, p2); + acc1 = sqr8<1>(acc1, p1, p2); + acc2 = sqr8<2>(acc2, p1, p2); + acc3 = sqr8<3>(acc3, p1, p2); + p1 += stride4; + p2 += stride4; + } + + p1End = a + (dims & ~(STRIDE_BYTES_LEN - 1)); + while (p1 < p1End) { + acc0 = sqr8<0>(acc0, p1, p2); + p1 += STRIDE_BYTES_LEN; + p2 += STRIDE_BYTES_LEN; + } + + // reduce (accumulate all) + acc0 = _mm512_add_epi32(_mm512_add_epi32(acc0, acc1), _mm512_add_epi32(acc2, acc3)); + acc4 = _mm512_add_epi32(_mm512_add_epi32(acc4, acc5), _mm512_add_epi32(acc6, acc7)); + return _mm512_reduce_add_epi32(_mm512_add_epi32(acc0, acc4)); +} + +extern "C" +EXPORT int32_t sqr7u_2(int8_t* a, int8_t* b, size_t dims) { + int32_t res = 0; + int i = 0; + if (dims > STRIDE_BYTES_LEN) { + i += dims & ~(STRIDE_BYTES_LEN - 1); + res = sqr7u_inner_avx512(a, b, i); + } + for (; i < dims; i++) { + int32_t dist = a[i] - b[i]; + res += dist * dist; + } + return res; +} + +#ifdef __clang__ +#pragma clang attribute pop +#elif __GNUC__ +#pragma GCC pop_options +#endif diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index 2724ac8ba1139..a773c751eeaf5 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -19,7 +19,7 @@ esplugin { restResources { restApi { - include '_common', 'indices', 'cluster', 'index', 'search', 'nodes', 'bulk', 'scripts_painless_execute' + include '_common', 'indices', 'cluster', 'index', 'search', 'nodes', 'bulk', 'scripts_painless_execute', 'put_script' } restTests { // Pulls in all aggregation tests from core AND the forwards v7's core for forwards compatibility diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java index ce7e4c63dc69c..c306e0fbcba06 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/DateDerivativeIT.java @@ -15,9 +15,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.SimpleValue; import org.elasticsearch.search.aggregations.support.AggregationPath; @@ -127,7 +127,7 @@ public void testSingleValuedField() throws Exception { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -171,7 +171,7 @@ public void testSingleValuedFieldNormalised() throws Exception { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -383,7 +383,7 @@ private static void addNTimes(int amount, String index, ZonedDateTime dateTime, } private static void assertBucket( - Histogram.Bucket bucket, + Bucket bucket, ZonedDateTime expectedKey, long expectedDocCount, Matcher derivativeMatcher, @@ -421,7 +421,7 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -500,7 +500,7 @@ public void testMultiValuedField() throws Exception { assertThat(buckets.size(), equalTo(4)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -574,7 +574,7 @@ public void testPartiallyUnmapped() throws Exception { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java index 7cbb298f49931..430f71879cb78 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/pipeline/SerialDiffIT.java @@ -12,8 +12,8 @@ import org.elasticsearch.aggregations.AggregationIntegTestCase; import org.elasticsearch.common.collect.EvictingQueue; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.SimpleValue; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -91,7 +91,7 @@ private void assertValidIterators(Iterator expectedBucketIter, Iterator ex } } - private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, Double expectedValue) { + private void assertBucketContents(Bucket actual, Double expectedCount, Double expectedValue) { // This is a gap bucket SimpleValue countDiff = actual.getAggregations().get("diff_counts"); if (expectedCount == null) { @@ -239,7 +239,7 @@ public void testBasicDiff() { List expectedCounts = testValues.get(MetricTarget.COUNT.toString()); List expectedValues = testValues.get(MetricTarget.VALUE.toString()); - Iterator actualIter = buckets.iterator(); + Iterator actualIter = buckets.iterator(); Iterator expectedBucketIter = mockHisto.iterator(); Iterator expectedCountsIter = expectedCounts.iterator(); Iterator expectedValuesIter = expectedValues.iterator(); @@ -247,7 +247,7 @@ public void testBasicDiff() { while (actualIter.hasNext()) { assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); - Histogram.Bucket actual = actualIter.next(); + Bucket actual = actualIter.next(); PipelineAggregationHelperTests.MockBucket expected = expectedBucketIter.next(); Double expectedCount = expectedCountsIter.next(); Double expectedValue = expectedValuesIter.next(); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java index 07a363ed727c7..dfe0a0642ccc3 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java @@ -239,8 +239,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I @Override public InternalAggregation buildEmptyAggregation() { - List buckets = new ArrayList<>(0); - return new InternalAdjacencyMatrix(name, buckets, metadata()); + return new InternalAdjacencyMatrix(name, List.of(), metadata()); } final long bucketOrd(long owningBucketOrdinal, int filterOrd) { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index cfaf4b77a07be..1e3042f8cf1e4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.search.aggregations.bucket.IteratorAndCurrent; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.bucket.histogram.AbstractHistogramBucket; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory; @@ -48,28 +49,20 @@ public final class InternalAutoDateHistogram extends InternalMultiBucketAggregat InternalAutoDateHistogram, InternalAutoDateHistogram.Bucket> implements Histogram, HistogramFactory { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { + public static class Bucket extends AbstractHistogramBucket implements KeyComparable { final long key; - final long docCount; - final InternalAggregations aggregations; - protected final transient DocValueFormat format; public Bucket(long key, long docCount, DocValueFormat format, InternalAggregations aggregations) { - this.format = format; + super(docCount, aggregations, format); this.key = key; - this.docCount = docCount; - this.aggregations = aggregations; } /** * Read from a stream. */ - public Bucket(StreamInput in, DocValueFormat format) throws IOException { - this.format = format; - key = in.readLong(); - docCount = in.readVLong(); - aggregations = InternalAggregations.readFrom(in); + public static Bucket readFrom(StreamInput in, DocValueFormat format) throws IOException { + return new Bucket(in.readLong(), in.readVLong(), format, InternalAggregations.readFrom(in)); } @Override @@ -105,16 +98,6 @@ public Object getKey() { return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } - @Override - public long getDocCount() { - return docCount; - } - - @Override - public InternalAggregations getAggregations() { - return aggregations; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String keyAsString = format.format(key).toString(); @@ -222,7 +205,7 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { super(in); bucketInfo = new BucketInfo(in); format = in.readNamedWriteable(DocValueFormat.class); - buckets = in.readCollectionAsList(stream -> new Bucket(stream, format)); + buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, format)); this.targetBuckets = in.readVInt(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { bucketInnerInterval = in.readVLong(); @@ -286,7 +269,7 @@ public InternalAutoDateHistogram create(List buckets) { @Override public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { - return new Bucket(prototype.key, prototype.docCount, prototype.format, aggregations); + return new Bucket(prototype.key, prototype.getDocCount(), prototype.getFormatter(), aggregations); } /** @@ -376,14 +359,14 @@ private List mergeBuckets( long roundedBucketKey = reduceRounding.round(bucket.key); if (Double.isNaN(key)) { key = roundedBucketKey; - sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); + sameKeyedBuckets.add(createBucket(key, bucket.getDocCount(), bucket.getAggregations())); } else if (roundedBucketKey == key) { - sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); + sameKeyedBuckets.add(createBucket(key, bucket.getDocCount(), bucket.getAggregations())); } else { mergedBuckets.add(reduceBucket(sameKeyedBuckets, reduceContext)); sameKeyedBuckets.clear(); key = roundedBucketKey; - sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); + sameKeyedBuckets.add(createBucket(key, bucket.getDocCount(), bucket.getAggregations())); } } if (sameKeyedBuckets.isEmpty() == false) { @@ -594,7 +577,7 @@ private BucketReduceResult mergeConsecutiveBuckets( sameKeyedBuckets.clear(); key = current.preparedRounding.round(bucket.key); } - sameKeyedBuckets.add(new Bucket(Math.round(key), bucket.docCount, format, bucket.aggregations)); + sameKeyedBuckets.add(new Bucket(Math.round(key), bucket.getDocCount(), format, bucket.getAggregations())); } if (sameKeyedBuckets.isEmpty() == false) { mergedBuckets.add(reduceBucket(sameKeyedBuckets, reduceContext)); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index 53142f6cdf601..f238419687cfc 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -105,7 +105,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I @Override public InternalAggregation buildEmptyAggregation() { - return new InternalTimeSeries(name, new ArrayList<>(), false, metadata()); + return new InternalTimeSeries(name, List.of(), false, metadata()); } @Override diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index e7af9f5745d6d..9b72f30542c54 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -501,7 +501,7 @@ public void testReadFromPre830() throws IOException { assertEquals(1, deserialized.getBuckets().size()); InternalAutoDateHistogram.Bucket bucket = deserialized.getBuckets().iterator().next(); assertEquals(10, bucket.key); - assertEquals(100, bucket.docCount); + assertEquals(100, bucket.getDocCount()); } } } diff --git a/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java b/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java index a3c737e2795d8..33cb223569b9b 100644 --- a/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java +++ b/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java @@ -18,11 +18,21 @@ public class AggregationsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .module("aggregations") - .module("lang-painless") - .feature(FeatureFlag.TIME_SERIES_MODE) - .build(); + public static ElasticsearchCluster cluster = makeCluster(); + + private static ElasticsearchCluster makeCluster() { + var cluster = ElasticsearchCluster.local().module("aggregations").module("lang-painless").feature(FeatureFlag.TIME_SERIES_MODE); + + // On Serverless, we want to disallow scripted metrics aggs per default. + // The following override allows us to still run the scripted metrics agg tests without breaking bwc. + boolean disableAllowListPerDefault = Boolean.parseBoolean( + System.getProperty("tests.disable_scripted_metric_allow_list_per_default") + ); + if (disableAllowListPerDefault) { + return cluster.setting("search.aggs.only_allowed_metric_scripts", "false").build(); + } + return cluster.build(); + } public AggregationsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_disallow_scripted_metrics.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/scripted_metric_allow_list.yml similarity index 100% rename from rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_disallow_scripted_metrics.yml rename to modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/scripted_metric_allow_list.yml diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java index c3cef19797d80..01a1e05ff40f2 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java @@ -9,7 +9,7 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; -import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.FilteringTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; @@ -20,7 +20,7 @@ * A token filter that generates unique tokens. Can remove unique tokens only on the same * position increments as well. */ -class UniqueTokenFilter extends TokenFilter { +class UniqueTokenFilter extends FilteringTokenFilter { private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class); private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class); @@ -38,33 +38,29 @@ class UniqueTokenFilter extends TokenFilter { } @Override - public final boolean incrementToken() throws IOException { - while (input.incrementToken()) { - final char term[] = termAttribute.buffer(); - final int length = termAttribute.length(); + protected boolean accept() { + final char term[] = termAttribute.buffer(); + final int length = termAttribute.length(); - boolean duplicate; - if (onlyOnSamePosition) { - final int posIncrement = posIncAttribute.getPositionIncrement(); - if (posIncrement > 0) { - previous.clear(); - } + boolean duplicate; + final int posIncrement = posIncAttribute.getPositionIncrement(); - duplicate = (posIncrement == 0 && previous.contains(term, 0, length)); - } else { - duplicate = previous.contains(term, 0, length); + if (onlyOnSamePosition) { + if (posIncrement > 0) { + previous.clear(); } - // clone the term, and add to the set of seen terms. - char saved[] = new char[length]; - System.arraycopy(term, 0, saved, 0, length); - previous.add(saved); - - if (duplicate == false) { - return true; - } + duplicate = (posIncrement == 0 && previous.contains(term, 0, length)); + } else { + duplicate = previous.contains(term, 0, length); } - return false; + + // clone the term, and add to the set of seen terms. + char saved[] = new char[length]; + System.arraycopy(term, 0, saved, 0, length); + previous.add(saved); + + return duplicate == false; } @Override diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java index 15f15530d112b..e9f29f590f991 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java @@ -12,19 +12,26 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class UniqueTokenFilterFactory extends AbstractTokenFilterFactory { + static final String ONLY_ON_SAME_POSITION = "only_on_same_position"; private final boolean onlyOnSamePosition; + private final boolean correctPositionIncrement; UniqueTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(name, settings); - this.onlyOnSamePosition = settings.getAsBoolean("only_on_same_position", false); + this.onlyOnSamePosition = settings.getAsBoolean(ONLY_ON_SAME_POSITION, false); + this.correctPositionIncrement = indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX); } @Override public TokenStream create(TokenStream tokenStream) { + if (correctPositionIncrement == false) { + return new XUniqueTokenFilter(tokenStream, onlyOnSamePosition); + } return new UniqueTokenFilter(tokenStream, onlyOnSamePosition); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XUniqueTokenFilter.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XUniqueTokenFilter.java new file mode 100644 index 0000000000000..35ac6ed5398c2 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XUniqueTokenFilter.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; + +import java.io.IOException; + +/** + * A token filter that generates unique tokens. Can remove unique tokens only on the same position increments as well. + * + * @deprecated this filter an old implementation superseded by {@link UniqueTokenFilter} only here for bwc reasons and should not be used + * any more. For details see https://github.com/elastic/elasticsearch/issues/35411 + */ +// TODO remove this filter in 9.0.0 +@Deprecated +class XUniqueTokenFilter extends TokenFilter { + + private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class); + private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class); + + private final CharArraySet previous = new CharArraySet(8, false); + private final boolean onlyOnSamePosition; + + XUniqueTokenFilter(TokenStream in, boolean onlyOnSamePosition) { + super(in); + this.onlyOnSamePosition = onlyOnSamePosition; + } + + @Override + public final boolean incrementToken() throws IOException { + while (input.incrementToken()) { + final char term[] = termAttribute.buffer(); + final int length = termAttribute.length(); + + final boolean duplicate; + if (onlyOnSamePosition) { + final int posIncrement = posIncAttribute.getPositionIncrement(); + if (posIncrement > 0) { + previous.clear(); + } + + duplicate = (posIncrement == 0 && previous.contains(term, 0, length)); + } else { + duplicate = previous.contains(term, 0, length); + } + + // clone the term, and add to the set of seen terms. + char saved[] = new char[length]; + System.arraycopy(term, 0, saved, 0, length); + previous.add(saved); + + if (duplicate == false) { + return true; + } + } + return false; + } + + @Override + public final void reset() throws IOException { + super.reset(); + previous.clear(); + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java index 4f5aa3e94d7b6..a2a5180b55ad6 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java @@ -9,15 +9,28 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.tests.analysis.MockTokenizer; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; +import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertAnalyzesTo; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class UniqueTokenFilterTests extends ESTestCase { public void testSimple() throws IOException { @@ -32,15 +45,126 @@ protected TokenStreamComponents createComponents(String fieldName) { TokenStream test = analyzer.tokenStream("test", "this test with test"); test.reset(); CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class); + PositionIncrementAttribute positionIncrement = test.addAttribute(PositionIncrementAttribute.class); assertThat(test.incrementToken(), equalTo(true)); assertThat(termAttribute.toString(), equalTo("this")); + assertEquals(1, positionIncrement.getPositionIncrement()); assertThat(test.incrementToken(), equalTo(true)); assertThat(termAttribute.toString(), equalTo("test")); + assertEquals(1, positionIncrement.getPositionIncrement()); assertThat(test.incrementToken(), equalTo(true)); assertThat(termAttribute.toString(), equalTo("with")); + assertEquals(1, positionIncrement.getPositionIncrement()); assertThat(test.incrementToken(), equalTo(false)); } + + public void testOnlyOnSamePosition() throws IOException { + Analyzer analyzer = new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false); + return new TokenStreamComponents(t, new UniqueTokenFilter(t, true)); + } + }; + + TokenStream test = analyzer.tokenStream("test", "this test with test"); + test.reset(); + CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class); + PositionIncrementAttribute positionIncrement = test.addAttribute(PositionIncrementAttribute.class); + assertThat(test.incrementToken(), equalTo(true)); + assertThat(termAttribute.toString(), equalTo("this")); + assertEquals(1, positionIncrement.getPositionIncrement()); + + assertThat(test.incrementToken(), equalTo(true)); + assertThat(termAttribute.toString(), equalTo("test")); + assertEquals(1, positionIncrement.getPositionIncrement()); + + assertThat(test.incrementToken(), equalTo(true)); + assertThat(termAttribute.toString(), equalTo("with")); + assertEquals(1, positionIncrement.getPositionIncrement()); + + assertThat(test.incrementToken(), equalTo(true)); + assertThat(termAttribute.toString(), equalTo("test")); + assertEquals(1, positionIncrement.getPositionIncrement()); + + assertThat(test.incrementToken(), equalTo(false)); + } + + public void testPositionIncrement() throws IOException { + Analyzer analyzer = new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false); + TokenFilter filters = new EdgeNGramTokenFilter(tokenizer, 1, 3, false); + filters = new UniqueTokenFilter(filters); + return new TokenStreamComponents(tokenizer, filters); + } + }; + + assertAnalyzesTo( + analyzer, + "foo bar bro bar bro baz", + new String[] { "f", "fo", "foo", "b", "ba", "bar", "br", "bro", "baz" }, + new int[] { 0, 0, 0, 4, 4, 4, 8, 8, 20 }, + new int[] { 3, 3, 3, 7, 7, 7, 11, 11, 23 }, + new int[] { 1, 0, 0, 1, 0, 0, 1, 0, 3 } + ); + analyzer.close(); + } + + /** + * For bwc reasons we need to return the legacy filter for indices create before 7.7 + */ + public void testOldVersionGetXUniqueTokenFilter() throws IOException { + + Settings settings = Settings.builder() + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersionUtils.getPreviousVersion(IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX) + ) + ) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin plugin = new CommonAnalysisPlugin()) { + + TokenFilterFactory tff = plugin.getTokenFilters().get("unique").get(idxSettings, null, "unique", settings); + TokenStream ts = tff.create(new TokenStream() { + + @Override + public boolean incrementToken() throws IOException { + return false; + } + }); + assertThat(ts, instanceOf(XUniqueTokenFilter.class)); + } + } + + public void testNewVersionGetUniqueTokenFilter() throws IOException { + + Settings settings = Settings.builder() + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX, IndexVersion.current()) + ) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin plugin = new CommonAnalysisPlugin()) { + + TokenFilterFactory tff = plugin.getTokenFilters().get("unique").get(idxSettings, null, "unique", settings); + TokenStream ts = tff.create(new TokenStream() { + + @Override + public boolean incrementToken() throws IOException { + return false; + } + }); + assertThat(ts, instanceOf(UniqueTokenFilter.class)); + } + } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/XUniqueTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/XUniqueTokenFilterTests.java new file mode 100644 index 0000000000000..3db13ad793838 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/XUniqueTokenFilterTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.tests.analysis.MockTokenizer; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class XUniqueTokenFilterTests extends ESTestCase { + public void testSimple() throws IOException { + Analyzer analyzer = new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false); + return new TokenStreamComponents(t, new XUniqueTokenFilter(t, false)); + } + }; + + TokenStream test = analyzer.tokenStream("test", "this test with test"); + test.reset(); + CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class); + assertThat(test.incrementToken(), equalTo(true)); + assertThat(termAttribute.toString(), equalTo("this")); + + assertThat(test.incrementToken(), equalTo(true)); + assertThat(termAttribute.toString(), equalTo("test")); + + assertThat(test.incrementToken(), equalTo(true)); + assertThat(termAttribute.toString(), equalTo("with")); + + assertThat(test.incrementToken(), equalTo(false)); + } +} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index c8c3b032200b7..2e6c445dc5e59 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -242,6 +242,52 @@ - length: { tokens: 2 } - match: { tokens.0.token: Foo } - match: { tokens.1.token: Bar! } +--- +"unique only_on_same_pos": + - do: + indices.analyze: + body: + text: ll l o + tokenizer: whitespace + filter: [{type: "ngram", "max_gram": 1}, {type: "unique", "only_on_same_position": true}] + - length: { tokens: 3 } + - match: { tokens.0.token: l } + - match: { tokens.1.token: l } + - match: { tokens.2.token: o } + + - do: + indices.analyze: + body: + text: ll l o + tokenizer: whitespace + filter: [{type: "ngram", "max_gram": 1}, {type: "unique", "only_on_same_position": false}] + - length: { tokens: 2 } + - match: { tokens.0.token: l } + - match: { tokens.1.token: o } +--- +"unique test position correction": + - requires: + cluster_features: "gte_v8.15.0" + reason: 'position fix for unique filter is available since 8.15.0' + - do: + indices.analyze: + body: + text: "los angeles leadership academy" + tokenizer: whitespace + filter: [{type: "edge_ngram", "max_gram": 2}, "unique"] + - length: { tokens: 6 } + - match: { tokens.0.token: l } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: lo } + - match: { tokens.1.position: 0 } + - match: { tokens.2.token: a } + - match: { tokens.2.position: 1 } + - match: { tokens.3.token: an } + - match: { tokens.3.position: 1 } + - match: { tokens.4.token: le } + - match: { tokens.4.position: 2 } + - match: { tokens.5.token: ac } + - match: { tokens.5.position: 3 } --- "synonym": diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java index ca33f08324539..027ac7c736c8a 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; /** * This base class provides the boilerplate to simplify the development of integration tests. @@ -53,7 +54,7 @@ static void waitForIndexTemplate(RestClient client, String indexTemplate) throws } catch (ResponseException e) { fail(e.getMessage()); } - }); + }, 15, TimeUnit.SECONDS); } static void createDataStream(RestClient client, String name) throws IOException { diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamUpgradeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamUpgradeRestIT.java index f447e5b80f8c8..39cdf77d04810 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamUpgradeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamUpgradeRestIT.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; @@ -306,6 +307,6 @@ private void waitForLogsComponentTemplateInitialization() throws Exception { // Throw the exception, if it was an error we did not anticipate throw responseException; } - }); + }, 15, TimeUnit.SECONDS); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index f5fa0db839230..2a1f33831c8c2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -217,7 +217,7 @@ private List findRoutingPaths(String indexName, Settings allSettings, Li private static void extractPath(List routingPaths, Mapper mapper) { if (mapper instanceof KeywordFieldMapper keywordFieldMapper) { if (keywordFieldMapper.fieldType().isDimension()) { - routingPaths.add(mapper.name()); + routingPaths.add(mapper.fullPath()); } } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 85f0d354576a4..cf6911850921b 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -83,7 +83,11 @@ public void testGetAdditionalIndexSettings() throws Exception { settings, List.of(new CompressedXContent(mapping)) ); - assertThat(result.size(), equalTo(3)); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + result = builder().put(result).put("index.mode", "time_series").build(); + assertThat(result.size(), equalTo(4)); + assertThat(IndexSettings.MODE.get(result), equalTo(IndexMode.TIME_SERIES)); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), contains("field3")); @@ -124,7 +128,11 @@ public void testGetAdditionalIndexSettingsIndexRoutingPathAlreadyDefined() throw settings, List.of(new CompressedXContent(mapping)) ); - assertThat(result.size(), equalTo(2)); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + result = builder().put(result).put("index.mode", "time_series").build(); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo("time_series")); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); } @@ -190,7 +198,11 @@ public void testGetAdditionalIndexSettingsMappingsMerging() throws Exception { settings, List.of(new CompressedXContent(mapping1), new CompressedXContent(mapping2), new CompressedXContent(mapping3)) ); - assertThat(result.size(), equalTo(3)); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + result = builder().put(result).put("index.mode", "time_series").build(); + assertThat(result.size(), equalTo(4)); + assertThat(IndexSettings.MODE.get(result), equalTo(IndexMode.TIME_SERIES)); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("field1", "field3")); @@ -211,7 +223,11 @@ public void testGetAdditionalIndexSettingsNoMappings() { settings, List.of() ); - assertThat(result.size(), equalTo(2)); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + result = builder().put(result).put("index.mode", "time_series").build(); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo("time_series")); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); } @@ -232,7 +248,11 @@ public void testGetAdditionalIndexSettingsLookAheadTime() throws Exception { settings, List.of(new CompressedXContent("{}")) ); - assertThat(result.size(), equalTo(2)); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + result = builder().put(result).put("index.mode", "time_series").build(); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo("time_series")); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); } @@ -253,7 +273,11 @@ public void testGetAdditionalIndexSettingsLookBackTime() throws Exception { settings, List.of(new CompressedXContent("{}")) ); - assertThat(result.size(), equalTo(2)); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + result = builder().put(result).put("index.mode", "time_series").build(); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo("time_series")); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookBackTime.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); } @@ -363,7 +387,11 @@ public void testGetAdditionalIndexSettingsMigrateToTsdb() { settings, List.of() ); - assertThat(result.size(), equalTo(2)); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + result = builder().put(result).put("index.mode", "time_series").build(); + assertThat(result.size(), equalTo(3)); + assertThat(result.get(IndexSettings.MODE.getKey()), equalTo("time_series")); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); } @@ -428,7 +456,8 @@ public void testGenerateRoutingPathFromDynamicTemplate() throws Exception { } """; Settings result = generateTsdbSettings(mapping, now); - assertThat(result.size(), equalTo(3)); + assertThat(result.size(), equalTo(4)); + assertThat(IndexSettings.MODE.get(result), equalTo(IndexMode.TIME_SERIES)); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("host.id", "prometheus.labels.*")); @@ -467,7 +496,8 @@ public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntri } """; Settings result = generateTsdbSettings(mapping, now); - assertThat(result.size(), equalTo(3)); + assertThat(result.size(), equalTo(4)); + assertThat(IndexSettings.MODE.get(result), equalTo(IndexMode.TIME_SERIES)); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat( @@ -516,7 +546,8 @@ public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntri } """; Settings result = generateTsdbSettings(mapping, now); - assertThat(result.size(), equalTo(3)); + assertThat(result.size(), equalTo(4)); + assertThat(IndexSettings.MODE.get(result), equalTo(IndexMode.TIME_SERIES)); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat( @@ -569,7 +600,8 @@ public void testGenerateRoutingPathFromDynamicTemplate_templateWithNoPathMatch() } """; Settings result = generateTsdbSettings(mapping, now); - assertThat(result.size(), equalTo(3)); + assertThat(result.size(), equalTo(4)); + assertThat(IndexSettings.MODE.get(result), equalTo(IndexMode.TIME_SERIES)); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("host.id", "prometheus.labels.*")); @@ -646,7 +678,8 @@ public void testGenerateRoutingPathFromPassThroughObject() throws Exception { } """; Settings result = generateTsdbSettings(mapping, now); - assertThat(result.size(), equalTo(3)); + assertThat(result.size(), equalTo(4)); + assertThat(IndexSettings.MODE.get(result), equalTo(IndexMode.TIME_SERIES)); assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("labels.*")); @@ -657,7 +690,7 @@ private Settings generateTsdbSettings(String mapping, Instant now) throws IOExce String dataStreamName = "logs-app1"; Settings settings = Settings.EMPTY; - return provider.getAdditionalIndexSettings( + var result = provider.getAdditionalIndexSettings( DataStream.getDefaultBackingIndexName(dataStreamName, 1), dataStreamName, true, @@ -666,6 +699,9 @@ private Settings generateTsdbSettings(String mapping, Instant now) throws IOExce settings, List.of(new CompressedXContent(mapping)) ); + // The index.time_series.end_time setting requires index.mode to be set to time_series adding it here so that we read this setting: + // (in production the index.mode setting is usually provided in an index or component template) + return builder().put(result).put("index.mode", "time_series").build(); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java index 86f6dea220e84..8156345b83b4c 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -398,11 +397,9 @@ public void testRolloverClusterStateWithBrokenOlderTsdbDataStream() throws Excep for (int i = 0; i < numberOfBackingIndices; i++) { var im = rolloverMetadata.index(rolloverMetadata.dataStreams().get(dataStreamName).getIndices().get(i)); - var startTime1 = IndexSettings.TIME_SERIES_START_TIME.get(im.getSettings()); - var endTime1 = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); - assertThat(startTime1.toEpochMilli(), equalTo(DateUtils.MAX_MILLIS_BEFORE_MINUS_9999)); - assertThat(endTime1.toEpochMilli(), equalTo(DateUtils.MAX_MILLIS_BEFORE_9999)); - assertThat(im.getIndexMode(), equalTo(null)); + assertThat(im.getTimeSeriesStart(), nullValue()); + assertThat(im.getTimeSeriesEnd(), nullValue()); + assertThat(im.getIndexMode(), nullValue()); } { var im = rolloverMetadata.index( diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 97959fa385241..eb35c44d30331 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -61,7 +61,7 @@ public void testGetTimestampFieldTypeForTsdbDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, notNullValue()); } @@ -70,7 +70,7 @@ public void testGetTimestampFieldTypeForDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, nullValue()); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java index c8774b18c7e9e..89625ee8a312e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java @@ -177,11 +177,29 @@ public void testValidateDefaultIgnoreMalformed() throws Exception { b.startObject("@timestamp"); b.field("type", "date"); b.endObject(); + b.startObject("summary"); + { + b.startObject("properties"); + { + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); }) ); assertThat(mapperService, notNullValue()); assertThat(mapperService.documentMapper().mappers().getMapper("@timestamp"), notNullValue()); assertThat(((DateFieldMapper) mapperService.documentMapper().mappers().getMapper("@timestamp")).ignoreMalformed(), is(false)); + DateFieldMapper summaryTimestamp = (DateFieldMapper) (mapperService.documentMapper() + .mappers() + .objectMappers() + .get("summary") + .getMapper("@timestamp")); + assertThat(summaryTimestamp, notNullValue()); + assertThat(summaryTimestamp.ignoreMalformed(), is(true)); } { MapperService mapperService = createMapperService( @@ -193,11 +211,22 @@ public void testValidateDefaultIgnoreMalformed() throws Exception { b.field("type", "date"); b.field("ignore_malformed", false); b.endObject(); + b.startObject("summary.@timestamp"); + b.field("type", "date"); + b.field("ignore_malformed", false); + b.endObject(); }) ); assertThat(mapperService, notNullValue()); assertThat(mapperService.documentMapper().mappers().getMapper("@timestamp"), notNullValue()); assertThat(((DateFieldMapper) mapperService.documentMapper().mappers().getMapper("@timestamp")).ignoreMalformed(), is(false)); + DateFieldMapper summaryTimestamp = (DateFieldMapper) (mapperService.documentMapper() + .mappers() + .objectMappers() + .get("summary") + .getMapper("@timestamp")); + assertThat(summaryTimestamp, notNullValue()); + assertThat(summaryTimestamp.ignoreMalformed(), is(false)); } } } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index 609b0c3d0c33c..35e3f38d55c26 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -211,7 +211,7 @@ setup: "Create data stream with failure store": - requires: cluster_features: ["gte_v8.15.0"] - reason: "data stream failure stores REST structure changed in 8.15+" + reason: "data stream failure stores default settings changed in 8.15+" - do: ingest.put_pipeline: @@ -368,6 +368,7 @@ setup: expand_wildcards: hidden - is_false: .$fsidx0name.settings.index.default_pipeline - is_false: .$fsidx0name.settings.index.final_pipeline + - is_true: .$fsidx0name.settings.index.failure_store.version - do: indices.get_settings: @@ -382,6 +383,7 @@ setup: expand_wildcards: hidden - is_false: .$fsidx1name.settings.index.default_pipeline - is_false: .$fsidx1name.settings.index.final_pipeline + - is_true: .$fsidx1name.settings.index.failure_store.version - do: indices.delete_data_stream: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index a1ded40ce1852..8c23232bb457c 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -423,6 +423,89 @@ dynamic templates - conflicting aliases: - match: { aggregations.filterA.tsids.buckets.0.key: "KGejYryCnrIkXYZdIF_Q8F8X2dfFIGKYisFh7t1RGGWOWgWU7C0RiFE" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } +--- +dynamic templates - conflicting aliases with top-level field: + - requires: + cluster_features: ["mapper.pass_through_priority"] + reason: support for priority in passthrough objects + - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [otel] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" + indices.put_index_template: + name: my-dynamic-template + body: + index_patterns: [otel] + data_stream: {} + template: + settings: + index: + number_of_shards: 1 + mode: time_series + time_series: + start_time: 2023-08-31T13:03:08.138Z + + mappings: + properties: + body: + type: match_only_text + attributes: + type: passthrough + dynamic: true + time_series_dimension: true + priority: 1 + scope: + properties: + attributes: + type: passthrough + dynamic: true + time_series_dimension: true + priority: 2 + resource: + properties: + attributes: + type: passthrough + dynamic: true + time_series_dimension: true + priority: 3 + metrics: + type: passthrough + dynamic: true + priority: 0 + dynamic_templates: + - counter_metric: + mapping: + type: integer + time_series_metric: counter + ignore_malformed: true + - strings_as_keyword: + mapping: + type: keyword + ignore_above: 1024 + match_mapping_type: string + path_match: "*attributes.*" + + - do: + bulk: + index: otel + refresh: true + body: + - '{ "create": { "dynamic_templates": { "metrics.data": "counter_metric" } } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "metrics": {"data": "10"}, "body": "top-level", "attributes": {"body": "attribute"}, "scope": {"attributes": {"body": "scope" }}, "resource": {"attributes": {"body": "resource" }}}' + - match: { errors: false } + + - do: + search: + index: otel + body: + size: 1 + fields: ["*"] + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.body: [ top-level ] } + - match: { hits.hits.0.fields.attributes\.body: [ attribute ] } + - match: { hits.hits.0.fields.scope\.attributes\.body: [ scope ] } + - match: { hits.hits.0.fields.resource\.attributes\.body: [ resource ] } + --- dynamic templates with nesting: - requires: @@ -875,3 +958,97 @@ passthrough objects with duplicate priority: resource.attributes: type: passthrough priority: 1 + +--- +dimensions with ignore_malformed and ignore_above: + - requires: + cluster_features: ["mapper.keyword_dimension_ignore_above"] + reason: support for ignore_above on keyword dimensions + - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" + indices.put_index_template: + name: my-dynamic-template + body: + index_patterns: [k9s*] + data_stream: {} + template: + settings: + index: + number_of_shards: 1 + mode: time_series + time_series: + start_time: 2023-08-31T13:03:08.138Z + + mappings: + properties: + attributes: + type: passthrough + time_series_dimension: true + priority: 0 + properties: + keyword_dim: + type: keyword + keyword_dim_ignored: + type: keyword + ignore_above: 2 + long_dim_ignored: + type: long + ignore_malformed: true + data: + type: long + time_series_metric: gauge + ignore_malformed: true + + - do: + bulk: + index: k9s + refresh: true + body: + - '{ "create": { } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": 10, "attributes.keyword_dim": "foo", "attributes.keyword_dim_ignored": "ignored", "attributes.long_dim_ignored": "ignored" }' + - '{ "create": { } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": 20, "attributes.keyword_dim": "foo", "attributes.keyword_dim_ignored": "ignored too", "attributes.long_dim_ignored": "ignored" }' + - '{ "create": { } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": 30, "attributes.keyword_dim_ignored": "ignored 3" }' + - '{ "create": { } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": 40, "attributes.keyword_dim_ignored": "ignored 4" }' + - '{ "create": { } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": 50, "attributes.keyword_dim_ignored": "duplicate" }' + - '{ "create": { } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": 60, "attributes.keyword_dim_ignored": "duplicate" }' + - match: { errors: true } + - match: { items.0.create.result: created } + - match: { items.1.create.result: created } + - match: { items.2.create.result: created } + - match: { items.3.create.result: created } + - match: { items.4.create.result: created } + - match: { items.5.create.error.type: version_conflict_engine_exception } + + - do: + search: + index: k9s + body: + sort: + - data: asc + + - match: { hits.total.value: 5 } + - match: { hits.hits.0._ignored: ["attributes.keyword_dim_ignored", "attributes.long_dim_ignored"]} + - match: { hits.hits.1._ignored: ["attributes.keyword_dim_ignored", "attributes.long_dim_ignored"]} + - match: { hits.hits.2._ignored: ["attributes.keyword_dim_ignored"]} + - match: { hits.hits.3._ignored: ["attributes.keyword_dim_ignored"]} + - match: { hits.hits.4._ignored: ["attributes.keyword_dim_ignored"]} + + - do: + search: + index: k9s + body: + size: 0 + aggs: + keyword_dims: + terms: + field: keyword_dim + + - length: { aggregations.keyword_dims.buckets: 1 } + - match: { aggregations.keyword_dims.buckets.0.key: "foo" } + - match: { aggregations.keyword_dims.buckets.0.doc_count: 2 } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml index dcbb0d2e465db..0742435f045fb 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml @@ -416,7 +416,7 @@ teardown: "Rolling over a failure store on a data stream without the failure store enabled should work": - do: allowed_warnings: - - "index template [my-other-template] has index patterns [data-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation" + - "index template [my-other-template] has index patterns [other-data-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-other-template] will take precedence during new index creation" indices.put_index_template: name: my-other-template body: diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java index 7bb875f8b6f69..16a8013ae9c4a 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.plugins.internal; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.common.bytes.BytesArray; @@ -89,13 +90,12 @@ public DocumentParsingProvider getDocumentParsingProvider() { // returns a static instance, because we want to assert that the wrapping is called only once return new DocumentParsingProvider() { @Override - public DocumentSizeObserver newFixedSizeDocumentObserver(long normalisedBytesParsed) { - providedFixedSize.set(normalisedBytesParsed); - return new TestDocumentSizeObserver(normalisedBytesParsed); - } - - @Override - public DocumentSizeObserver newDocumentSizeObserver() { + public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { + if (request instanceof IndexRequest indexRequest && indexRequest.getNormalisedBytesParsed() > 0) { + long normalisedBytesParsed = indexRequest.getNormalisedBytesParsed(); + providedFixedSize.set(normalisedBytesParsed); + return new TestDocumentSizeObserver(normalisedBytesParsed); + } return new TestDocumentSizeObserver(0L); } @@ -137,6 +137,7 @@ public Map map() throws IOException { public long normalisedBytesParsed() { return mapCounter; } + } } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index f755a27b478cc..5bdb6da5c7b29 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -64,6 +64,15 @@ tasks.named("internalClusterTest") { onlyIf("OS != windows") { OS.current() != OS.WINDOWS } } +tasks.named('forbiddenApisTest').configure { + //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + modifyBundledSignatures { bundledSignatures -> + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' + bundledSignatures + } +} + tasks.named("forbiddenPatterns").configure { exclude '**/*.mmdb' } diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 9dcd8abc7bc57..9eab00fbadf20 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -242,7 +242,7 @@ public void testGeoIpDatabasesDownload() throws Exception { Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb", "MyCustomGeoLite2-City.mmdb"), state.getDatabases().keySet() ); - GeoIpTaskState.Metadata metadata = state.get(id); + GeoIpTaskState.Metadata metadata = state.getDatabases().get(id); int size = metadata.lastChunk() - metadata.firstChunk() + 1; assertResponse( prepareSearch(GeoIpDownloader.DATABASES_INDEX).setSize(size) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java index 266d40f2f9d56..efae8fa0c50ca 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java @@ -64,7 +64,7 @@ import java.util.zip.GZIPInputStream; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId; +import static org.elasticsearch.ingest.geoip.GeoIpTaskState.getGeoIpTaskState; /** * A component that is responsible for making the databases maintained by {@link GeoIpDownloader} @@ -179,11 +179,10 @@ public Boolean isValid(String databaseFile) { ClusterState currentState = clusterService.state(); assert currentState != null; - PersistentTasksCustomMetadata.PersistentTask task = getTaskWithId(currentState, GeoIpDownloader.GEOIP_DOWNLOADER); - if (task == null || task.getState() == null) { + GeoIpTaskState state = getGeoIpTaskState(currentState); + if (state == null) { return true; } - GeoIpTaskState state = (GeoIpTaskState) task.getState(); GeoIpTaskState.Metadata metadata = state.getDatabases().get(databaseFile); // we never remove metadata from cluster state, if metadata is null we deal with built-in database, which is always valid if (metadata == null) { @@ -270,12 +269,11 @@ void checkDatabases(ClusterState state) { } } - PersistentTasksCustomMetadata.PersistentTask task = PersistentTasksCustomMetadata.getTaskWithId( - state, - GeoIpDownloader.GEOIP_DOWNLOADER - ); - // Empty state will purge stale entries in databases map. - GeoIpTaskState taskState = task == null || task.getState() == null ? GeoIpTaskState.EMPTY : (GeoIpTaskState) task.getState(); + GeoIpTaskState taskState = getGeoIpTaskState(state); + if (taskState == null) { + // Note: an empty state will purge stale entries in databases map + taskState = GeoIpTaskState.EMPTY; + } taskState.getDatabases().entrySet().stream().filter(e -> e.getValue().isValid(state.getMetadata().settings())).forEach(e -> { String name = e.getKey(); @@ -291,7 +289,7 @@ void checkDatabases(ClusterState state) { try { retrieveAndUpdateDatabase(name, metadata); } catch (Exception ex) { - logger.error(() -> "attempt to download database [" + name + "] failed", ex); + logger.error(() -> "failed to retrieve database [" + name + "]", ex); } }); @@ -511,4 +509,5 @@ public Set getFilesInTemp() { public CacheStats getCacheStats() { return cache.getCacheStats(); } + } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 713e5111853a7..13394a2a0c7cc 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -111,16 +111,25 @@ public class GeoIpDownloader extends AllocatedPersistentTask { Supplier atLeastOneGeoipProcessorSupplier ) { super(id, type, action, description, parentTask, headers); - this.httpClient = httpClient; this.client = client; + this.httpClient = httpClient; this.clusterService = clusterService; this.threadPool = threadPool; - endpoint = ENDPOINT_SETTING.get(settings); + this.endpoint = ENDPOINT_SETTING.get(settings); this.pollIntervalSupplier = pollIntervalSupplier; this.eagerDownloadSupplier = eagerDownloadSupplier; this.atLeastOneGeoipProcessorSupplier = atLeastOneGeoipProcessorSupplier; } + void setState(GeoIpTaskState state) { + // this is for injecting the state in GeoIpDownloaderTaskExecutor#nodeOperation just after the task instance has been created + // by the PersistentTasksNodeService -- since the GeoIpDownloader is newly created, the state will be null, and the passed-in + // state cannot be null + assert this.state == null; + assert state != null; + this.state = state; + } + // visible for testing void updateDatabases() throws IOException { var clusterState = clusterService.state(); @@ -161,23 +170,28 @@ private List fetchDatabasesOverview() throws IOException { } // visible for testing - void processDatabase(Map databaseInfo) { + void processDatabase(final Map databaseInfo) { String name = databaseInfo.get("name").toString().replace(".tgz", "") + ".mmdb"; String md5 = (String) databaseInfo.get("md5_hash"); - if (state.contains(name) && Objects.equals(md5, state.get(name).md5())) { - updateTimestamp(name, state.get(name)); - return; - } - logger.debug("downloading geoip database [{}]", name); String url = databaseInfo.get("url").toString(); if (url.startsWith("http") == false) { - // relative url, add it after last slash (i.e resolve sibling) or at the end if there's no slash after http[s]:// + // relative url, add it after last slash (i.e. resolve sibling) or at the end if there's no slash after http[s]:// int lastSlash = endpoint.substring(8).lastIndexOf('/'); url = (lastSlash != -1 ? endpoint.substring(0, lastSlash + 8) : endpoint) + "/" + url; } + processDatabase(name, md5, url); + } + + private void processDatabase(final String name, final String md5, final String url) { + Metadata metadata = state.getDatabases().getOrDefault(name, Metadata.EMPTY); + if (Objects.equals(metadata.md5(), md5)) { + updateTimestamp(name, metadata); + return; + } + logger.debug("downloading geoip database [{}]", name); long start = System.currentTimeMillis(); try (InputStream is = httpClient.get(url)) { - int firstChunk = state.contains(name) ? state.get(name).lastChunk() + 1 : 0; + int firstChunk = metadata.lastChunk() + 1; // if there is no metadata, then Metadata.EMPTY.lastChunk() + 1 = 0 int lastChunk = indexChunks(name, is, firstChunk, md5, start); if (lastChunk > firstChunk) { state = state.put(name, new Metadata(start, firstChunk, lastChunk - 1, md5, start)); @@ -264,14 +278,13 @@ static byte[] getChunk(InputStream is) throws IOException { return buf; } - void setState(GeoIpTaskState state) { - this.state = state; - } - /** * Downloads the geoip databases now, and schedules them to be downloaded again after pollInterval. */ void runDownloader() { + // by the time we reach here, the state will never be null + assert state != null; + if (isCancelled() || isCompleted()) { return; } @@ -305,22 +318,19 @@ public void requestReschedule() { } private void cleanDatabases() { - long expiredDatabases = state.getDatabases() + List> expiredDatabases = state.getDatabases() .entrySet() .stream() .filter(e -> e.getValue().isValid(clusterService.state().metadata().settings()) == false) - .peek(e -> { - String name = e.getKey(); - Metadata meta = e.getValue(); - deleteOldChunks(name, meta.lastChunk() + 1); - state = state.put( - name, - new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1) - ); - updateTaskState(); - }) - .count(); - stats = stats.expiredDatabases((int) expiredDatabases); + .toList(); + expiredDatabases.forEach(e -> { + String name = e.getKey(); + Metadata meta = e.getValue(); + deleteOldChunks(name, meta.lastChunk() + 1); + state = state.put(name, new Metadata(meta.lastUpdate(), meta.firstChunk(), meta.lastChunk(), meta.md5(), meta.lastCheck() - 1)); + updateTaskState(); + }); + stats = stats.expiredDatabases(expiredDatabases.size()); } @Override diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 0a423cb375e88..09ac488f96e2d 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -105,7 +105,7 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor getDatabases() { return databases; } - public boolean contains(String name) { - return databases.containsKey(name); - } - - public Metadata get(String name) { - return databases.get(name); - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -138,7 +134,13 @@ public void writeTo(StreamOutput out) throws IOException { record Metadata(long lastUpdate, int firstChunk, int lastChunk, String md5, long lastCheck) implements ToXContentObject { - static final String NAME = GEOIP_DOWNLOADER + "-metadata"; + /** + * An empty Metadata object useful for getOrDefault -type calls. Crucially, the 'lastChunk' is -1, so it's safe to use + * with logic that says the new firstChunk is the old lastChunk + 1. + */ + static Metadata EMPTY = new Metadata(-1, -1, -1, "", -1); + + private static final String NAME = GEOIP_DOWNLOADER + "-metadata"; private static final ParseField LAST_CHECK = new ParseField("last_check"); private static final ParseField LAST_UPDATE = new ParseField("last_update"); private static final ParseField FIRST_CHUNK = new ParseField("first_chunk"); @@ -200,4 +202,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } } + + /** + * Retrieves the geoip downloader's task state from the cluster state. This may return null in some circumstances, + * for example if the geoip downloader task hasn't been created yet (which it wouldn't be if it's disabled). + * + * @param state the cluster state to read the task state from + * @return the geoip downloader's task state or null if there is not a state to read + */ + @Nullable + static GeoIpTaskState getGeoIpTaskState(ClusterState state) { + PersistentTasksCustomMetadata.PersistentTask task = getTaskWithId(state, GeoIpDownloader.GEOIP_DOWNLOADER); + return (task == null) ? null : (GeoIpTaskState) task.getState(); + } + } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java index 188d826b05ff5..8efc4dc2e74bd 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java @@ -15,14 +15,16 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.rest.RestStatus; -import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; +import java.net.Authenticator; import java.net.HttpURLConnection; +import java.net.PasswordAuthentication; import java.net.URL; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import java.util.Objects; import static java.net.HttpURLConnection.HTTP_MOVED_PERM; import static java.net.HttpURLConnection.HTTP_MOVED_TEMP; @@ -32,49 +34,89 @@ class HttpClient { - byte[] getBytes(String url) throws IOException { - return get(url).readAllBytes(); + // a private sentinel value for representing the idea that there's no auth for some request. + // this allows us to have a not-null requirement on the methods that do accept an auth. + // if you don't want auth, then don't use those methods. ;) + private static final PasswordAuthentication NO_AUTH = new PasswordAuthentication("no_auth", "no_auth_unused".toCharArray()); + + PasswordAuthentication auth(final String username, final String password) { + return new PasswordAuthentication(username, password.toCharArray()); + } + + byte[] getBytes(final String url) throws IOException { + return getBytes(NO_AUTH, url); + } + + byte[] getBytes(final PasswordAuthentication auth, final String url) throws IOException { + return get(auth, url).readAllBytes(); + } + + InputStream get(final String url) throws IOException { + return get(NO_AUTH, url); } - InputStream get(String urlToGet) throws IOException { + InputStream get(final PasswordAuthentication auth, final String url) throws IOException { + Objects.requireNonNull(auth); + Objects.requireNonNull(url); + + final String originalAuthority = new URL(url).getAuthority(); + return doPrivileged(() -> { - String url = urlToGet; - HttpURLConnection conn = createConnection(url); + String innerUrl = url; + HttpURLConnection conn = createConnection(auth, innerUrl); int redirectsCount = 0; while (true) { switch (conn.getResponseCode()) { case HTTP_OK: - return new BufferedInputStream(getInputStream(conn)); + return getInputStream(conn); case HTTP_MOVED_PERM: case HTTP_MOVED_TEMP: case HTTP_SEE_OTHER: if (redirectsCount++ > 50) { - throw new IllegalStateException("too many redirects connection to [" + urlToGet + "]"); + throw new IllegalStateException("too many redirects connection to [" + url + "]"); + } + + // deal with redirections (including relative urls) + final String location = conn.getHeaderField("Location"); + final URL base = new URL(innerUrl); + final URL next = new URL(base, location); + innerUrl = next.toExternalForm(); + + // compare the *original* authority and the next authority to determine whether to include auth details. + // this means that the host and port (if it is provided explicitly) are considered. it also means that if we + // were to ping-pong back to the original authority, then we'd start including the auth details again. + final String nextAuthority = next.getAuthority(); + if (originalAuthority.equals(nextAuthority)) { + conn = createConnection(auth, innerUrl); + } else { + conn = createConnection(NO_AUTH, innerUrl); } - String location = conn.getHeaderField("Location"); - URL base = new URL(url); - URL next = new URL(base, location); // Deal with relative URLs - url = next.toExternalForm(); - conn = createConnection(url); break; case HTTP_NOT_FOUND: - throw new ResourceNotFoundException("{} not found", urlToGet); + throw new ResourceNotFoundException("{} not found", url); default: int responseCode = conn.getResponseCode(); - throw new ElasticsearchStatusException("error during downloading {}", RestStatus.fromCode(responseCode), urlToGet); + throw new ElasticsearchStatusException("error during downloading {}", RestStatus.fromCode(responseCode), url); } } }); } @SuppressForbidden(reason = "we need socket connection to download data from internet") - private static InputStream getInputStream(HttpURLConnection conn) throws IOException { + private static InputStream getInputStream(final HttpURLConnection conn) throws IOException { return conn.getInputStream(); } - private static HttpURLConnection createConnection(String url) throws IOException { - HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection(); + private static HttpURLConnection createConnection(final PasswordAuthentication auth, final String url) throws IOException { + final HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection(); + if (auth != NO_AUTH) { + conn.setAuthenticator(new Authenticator() { + protected PasswordAuthentication getPasswordAuthentication() { + return auth; + } + }); + } conn.setConnectTimeout(10000); conn.setReadTimeout(10000); conn.setDoOutput(false); @@ -82,7 +124,7 @@ private static HttpURLConnection createConnection(String url) throws IOException return conn; } - private static R doPrivileged(CheckedSupplier supplier) throws IOException { + private static R doPrivileged(final CheckedSupplier supplier) throws IOException { SpecialPermission.check(); try { return AccessController.doPrivileged((PrivilegedExceptionAction) supplier::get); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index d84e1aac303d9..6a83fe69473f7 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -30,10 +30,17 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStats; import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.PersistentTaskResponse; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -48,6 +55,9 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.temporal.ChronoUnit; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -62,6 +72,8 @@ import static org.elasticsearch.ingest.geoip.GeoIpDownloader.MAX_CHUNK_SIZE; import static org.elasticsearch.tasks.TaskId.EMPTY_TASK_ID; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; @@ -75,8 +87,9 @@ public class GeoIpDownloaderTests extends ESTestCase { private GeoIpDownloader geoIpDownloader; @Before - public void setup() { + public void setup() throws IOException { httpClient = mock(HttpClient.class); + when(httpClient.getBytes(anyString())).thenReturn("[]".getBytes(StandardCharsets.UTF_8)); clusterService = mock(ClusterService.class); threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); when(clusterService.getClusterSettings()).thenReturn( @@ -108,7 +121,13 @@ public void setup() { () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), () -> true - ); + ) { + { + GeoIpTaskParams geoIpTaskParams = mock(GeoIpTaskParams.class); + when(geoIpTaskParams.getWriteableName()).thenReturn(GeoIpDownloader.GEOIP_DOWNLOADER); + init(new PersistentTasksService(clusterService, threadPool, client), null, null, 0); + } + }; } @After @@ -276,9 +295,8 @@ public void testProcessDatabaseNew() throws IOException { () -> true ) { @Override - void updateTaskState() { - assertEquals(0, state.get("test").firstChunk()); - assertEquals(10, state.get("test").lastChunk()); + protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) { + fail(); } @Override @@ -289,19 +307,22 @@ int indexChunks(String name, InputStream is, int chunk, String expectedMd5, long } @Override - protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) { - fail(); + void updateTaskState() { + assertEquals(0, state.getDatabases().get("test.mmdb").firstChunk()); + assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk()); } @Override void deleteOldChunks(String name, int firstChunk) { - assertEquals("test", name); + assertEquals("test.mmdb", name); assertEquals(0, firstChunk); } }; geoIpDownloader.setState(GeoIpTaskState.EMPTY); geoIpDownloader.processDatabase(Map.of("name", "test.tgz", "url", "http://a.b/t1", "md5_hash", "1")); + GeoIpDownloaderStats stats = geoIpDownloader.getStatus(); + assertEquals(0, stats.getFailedDownloads()); } public void testProcessDatabaseUpdate() throws IOException { @@ -325,9 +346,8 @@ public void testProcessDatabaseUpdate() throws IOException { () -> true ) { @Override - void updateTaskState() { - assertEquals(9, state.get("test.mmdb").firstChunk()); - assertEquals(10, state.get("test.mmdb").lastChunk()); + protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) { + fail(); } @Override @@ -338,8 +358,9 @@ int indexChunks(String name, InputStream is, int chunk, String expectedMd5, long } @Override - protected void updateTimestamp(String name, GeoIpTaskState.Metadata metadata) { - fail(); + void updateTaskState() { + assertEquals(9, state.getDatabases().get("test.mmdb").firstChunk()); + assertEquals(10, state.getDatabases().get("test.mmdb").lastChunk()); } @Override @@ -351,6 +372,8 @@ void deleteOldChunks(String name, int firstChunk) { geoIpDownloader.setState(GeoIpTaskState.EMPTY.put("test.mmdb", new GeoIpTaskState.Metadata(0, 5, 8, "0", 0))); geoIpDownloader.processDatabase(Map.of("name", "test.tgz", "url", "http://a.b/t1", "md5_hash", "1")); + GeoIpDownloaderStats stats = geoIpDownloader.getStatus(); + assertEquals(0, stats.getFailedDownloads()); } public void testProcessDatabaseSame() throws IOException { @@ -376,8 +399,9 @@ public void testProcessDatabaseSame() throws IOException { () -> true ) { @Override - void updateTaskState() { - fail(); + protected void updateTimestamp(String name, GeoIpTaskState.Metadata newMetadata) { + assertEquals(metadata, newMetadata); + assertEquals("test.mmdb", name); } @Override @@ -387,9 +411,8 @@ int indexChunks(String name, InputStream is, int chunk, String expectedMd5, long } @Override - protected void updateTimestamp(String name, GeoIpTaskState.Metadata newMetadata) { - assertEquals(metadata, newMetadata); - assertEquals("test.mmdb", name); + void updateTaskState() { + fail(); } @Override @@ -399,6 +422,8 @@ void deleteOldChunks(String name, int firstChunk) { }; geoIpDownloader.setState(taskState); geoIpDownloader.processDatabase(Map.of("name", "test.tgz", "url", "http://a.b/t1", "md5_hash", "1")); + GeoIpDownloaderStats stats = geoIpDownloader.getStatus(); + assertEquals(0, stats.getFailedDownloads()); } @SuppressWarnings("unchecked") @@ -534,6 +559,78 @@ public void testUpdateDatabasesIndexNotReady() { verifyNoInteractions(httpClient); } + public void testThatRunDownloaderDeletesExpiredDatabases() { + /* + * This test puts some expired databases and some non-expired ones into the GeoIpTaskState, and then calls runDownloader(), making + * sure that the expired databases have been deleted. + */ + AtomicInteger updatePersistentTaskStateCount = new AtomicInteger(0); + AtomicInteger deleteCount = new AtomicInteger(0); + int expiredDatabasesCount = randomIntBetween(1, 100); + int unexpiredDatabasesCount = randomIntBetween(0, 100); + Map databases = new HashMap<>(); + for (int i = 0; i < expiredDatabasesCount; i++) { + databases.put("expiredDatabase" + i, newGeoIpTaskStateMetadata(true)); + } + for (int i = 0; i < unexpiredDatabasesCount; i++) { + databases.put("unexpiredDatabase" + i, newGeoIpTaskStateMetadata(false)); + } + GeoIpTaskState geoIpTaskState = new GeoIpTaskState(databases); + geoIpDownloader.setState(geoIpTaskState); + client.addHandler( + UpdatePersistentTaskStatusAction.INSTANCE, + (UpdatePersistentTaskStatusAction.Request request, ActionListener taskResponseListener) -> { + PersistentTasksCustomMetadata.Assignment assignment = mock(PersistentTasksCustomMetadata.Assignment.class); + PersistentTasksCustomMetadata.PersistentTask persistentTask = new PersistentTasksCustomMetadata.PersistentTask<>( + GeoIpDownloader.GEOIP_DOWNLOADER, + GeoIpDownloader.GEOIP_DOWNLOADER, + new GeoIpTaskParams(), + request.getAllocationId(), + assignment + ); + updatePersistentTaskStateCount.incrementAndGet(); + taskResponseListener.onResponse(new PersistentTaskResponse(new PersistentTask<>(persistentTask, request.getState()))); + } + ); + client.addHandler( + DeleteByQueryAction.INSTANCE, + (DeleteByQueryRequest request, ActionListener flushResponseActionListener) -> { + deleteCount.incrementAndGet(); + } + ); + geoIpDownloader.runDownloader(); + assertThat(geoIpDownloader.getStatus().getExpiredDatabases(), equalTo(expiredDatabasesCount)); + for (int i = 0; i < expiredDatabasesCount; i++) { + // This currently fails because we subtract one millisecond from the lastChecked time + // assertThat(geoIpDownloader.state.getDatabases().get("expiredDatabase" + i).lastCheck(), equalTo(-1L)); + } + for (int i = 0; i < unexpiredDatabasesCount; i++) { + assertThat( + geoIpDownloader.state.getDatabases().get("unexpiredDatabase" + i).lastCheck(), + greaterThanOrEqualTo(Instant.now().minus(30, ChronoUnit.DAYS).toEpochMilli()) + ); + } + assertThat(deleteCount.get(), equalTo(expiredDatabasesCount)); + assertThat(updatePersistentTaskStateCount.get(), equalTo(expiredDatabasesCount)); + geoIpDownloader.runDownloader(); + /* + * The following two lines assert current behavior that might not be desirable -- we continue to delete expired databases every + * time that runDownloader runs. This seems unnecessary. + */ + assertThat(deleteCount.get(), equalTo(expiredDatabasesCount * 2)); + assertThat(updatePersistentTaskStateCount.get(), equalTo(expiredDatabasesCount * 2)); + } + + private GeoIpTaskState.Metadata newGeoIpTaskStateMetadata(boolean expired) { + Instant lastChecked; + if (expired) { + lastChecked = Instant.now().minus(randomIntBetween(31, 100), ChronoUnit.DAYS); + } else { + lastChecked = Instant.now().minus(randomIntBetween(0, 29), ChronoUnit.DAYS); + } + return new GeoIpTaskState.Metadata(0, 0, 0, randomAlphaOfLength(20), lastChecked.toEpochMilli()); + } + private static class MockClient extends NoOpClient { private final Map, BiConsumer>> handlers = new HashMap<>(); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/HttpClientTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/HttpClientTests.java new file mode 100644 index 0000000000000..a3f4395d74755 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/HttpClientTests.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import com.sun.net.httpserver.BasicAuthenticator; +import com.sun.net.httpserver.HttpServer; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.PasswordAuthentication; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.equalTo; + +public class HttpClientTests extends ESTestCase { + + private static HttpServer server; + + @BeforeClass + public static void startServer() throws Throwable { + server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + server.createContext("/hello/", exchange -> { + try { + String response = "hello world"; + exchange.sendResponseHeaders(200, response.length()); + try (OutputStream os = exchange.getResponseBody()) { + os.write(response.getBytes(StandardCharsets.UTF_8)); + } + } catch (Exception e) { + fail(e); + } + }); + server.createContext("/404/", exchange -> { + try { + exchange.sendResponseHeaders(404, 0); + } catch (Exception e) { + fail(e); + } + }); + server.createContext("/auth/", exchange -> { + try { + String response = "super secret hello world"; + exchange.sendResponseHeaders(200, response.length()); + try (OutputStream os = exchange.getResponseBody()) { + os.write(response.getBytes(StandardCharsets.UTF_8)); + } + } catch (Exception e) { + fail(e); + } + }).setAuthenticator(new BasicAuthenticator("some realm") { + @Override + public boolean checkCredentials(String username, String password) { + return "user".equals(username) && "pass".equals(password); + } + }); + server.createContext("/redirect", exchange -> { + // path is either like this: /redirect/count/destination/ + // or just: /redirect + try { + final String path = exchange.getRequestURI().getPath(); + int count; + String destination; + if (path.lastIndexOf("/") > 0) { + // path is /redirect/count/destination/, so pull out the bits + String[] bits = path.split("/"); + count = Integer.parseInt(bits[2]); + destination = bits[3]; + } else { + // path is just /redirect + count = -1; + destination = "hello"; + } + + if (count == -1) { + // send a relative redirect, i.e. just "hello/" + exchange.getResponseHeaders().add("Location", destination + "/"); + } else if (count > 0) { + // decrement the count and send a redirect to either a full url ("http://...") + // or to an absolute url on this same server ("/...") + count--; + String location = "/redirect/" + count + "/" + destination + "/"; + if (count % 2 == 0) { + location = url(location); // do the full url + } + exchange.getResponseHeaders().add("Location", location); + } else { + // the count has hit zero, so ship them off to the destination + exchange.getResponseHeaders().add("Location", "/" + destination + "/"); + } + exchange.sendResponseHeaders(302, 0); + } catch (Exception e) { + fail(e); + } + }); + server.start(); + } + + @AfterClass + public static void stopServer() { + server.stop(0); + } + + private static String url(final String path) { + String hostname = server.getAddress().getHostString(); + int port = server.getAddress().getPort(); + return "http://" + hostname + ":" + port + path; + } + + private static String bytesToString(final byte[] bytes) { + return new String(bytes, StandardCharsets.UTF_8); + } + + public void testGetBytes() throws Exception { + HttpClient client = new HttpClient(); + String u = url("/hello/"); + String response = bytesToString(client.getBytes(u)); + assertThat(response, equalTo("hello world")); + } + + public void testGetBytes404() { + HttpClient client = new HttpClient(); + String u = url("/404/"); + Exception e = expectThrows(ResourceNotFoundException.class, () -> client.getBytes(u)); + assertThat(e.getMessage(), equalTo(u + " not found")); + } + + public void testRedirect() throws Exception { + HttpClient client = new HttpClient(); + String u = url("/redirect/3/hello/"); + String response = bytesToString(client.getBytes(u)); + assertThat(response, equalTo("hello world")); + } + + public void testRelativeRedirect() throws Exception { + HttpClient client = new HttpClient(); + String u = url("/redirect"); + String response = bytesToString(client.getBytes(u)); + assertThat(response, equalTo("hello world")); + } + + public void testRedirectTo404() { + HttpClient client = new HttpClient(); + String u = url("/redirect/5/404/"); + Exception e = expectThrows(ResourceNotFoundException.class, () -> client.getBytes(u)); + assertThat(e.getMessage(), equalTo(u + " not found")); + } + + public void testTooManyRedirects() { + HttpClient client = new HttpClient(); + String u = url("/redirect/100/hello/"); + Exception e = expectThrows(IllegalStateException.class, () -> client.getBytes(u)); + assertThat(e.getMessage(), equalTo("too many redirects connection to [" + u + "]")); + } + + public void testGetBytes401() { + HttpClient client = new HttpClient(); + String u = url("/auth/"); + { + Exception e = expectThrows(ElasticsearchStatusException.class, () -> client.getBytes(u)); + assertThat(e.getMessage(), equalTo("error during downloading " + u)); + } + { + PasswordAuthentication auth = client.auth("bad", "credentials"); + Exception e = expectThrows(ElasticsearchStatusException.class, () -> client.getBytes(auth, u)); + assertThat(e.getMessage(), equalTo("error during downloading " + u)); + } + } + + public void testGetBytesWithAuth() throws Exception { + HttpClient client = new HttpClient(); + String u = url("/auth/"); + PasswordAuthentication auth = client.auth("user", "pass"); + String response = bytesToString(client.getBytes(auth, u)); + assertThat(response, equalTo("super secret hello world")); + } + + public void testRedirectToAuth() throws Exception { + HttpClient client = new HttpClient(); + String u = url("/redirect/3/auth/"); + { + Exception e = expectThrows(ElasticsearchStatusException.class, () -> client.getBytes(u)); + assertThat(e.getMessage(), equalTo("error during downloading " + u)); + } + { + PasswordAuthentication auth = client.auth("bad", "credentials"); + Exception e = expectThrows(ElasticsearchStatusException.class, () -> client.getBytes(auth, u)); + assertThat(e.getMessage(), equalTo("error during downloading " + u)); + } + { + PasswordAuthentication auth = client.auth("user", "pass"); + String response = bytesToString(client.getBytes(auth, u)); + assertThat(response, equalTo("super secret hello world")); + } + } +} diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java index 48e2b14e63fc7..b4cb4404525f4 100644 --- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java +++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java @@ -11,8 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -37,7 +35,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; /** @@ -108,7 +105,6 @@ public void testKibanaThreadPoolByPassesBlockedThreadPools() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107625") public void testBlockedThreadPoolsRejectUserRequests() throws Exception { assertAcked(client().admin().indices().prepareCreate(USER_INDEX)); @@ -126,15 +122,16 @@ private void assertThreadPoolsBlocked() { assertThat(e1.getMessage(), startsWith("rejected execution of TimedRunnable")); var e2 = expectThrows(EsRejectedExecutionException.class, () -> client().prepareGet(USER_INDEX, "id").get()); assertThat(e2.getMessage(), startsWith("rejected execution of ActionRunnable")); - var e3 = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch(USER_INDEX) - .setQuery(QueryBuilders.matchAllQuery()) - // Request times out if max concurrent shard requests is set to 1 - .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) - .get() - ); - assertThat(e3.getMessage(), containsString("all shards failed")); + // intentionally commented out this test until https://github.com/elastic/elasticsearch/issues/97916 is fixed + // var e3 = expectThrows( + // SearchPhaseExecutionException.class, + // () -> client().prepareSearch(USER_INDEX) + // .setQuery(QueryBuilders.matchAllQuery()) + // // Request times out if max concurrent shard requests is set to 1 + // .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) + // .get() + // ); + // assertThat(e3.getMessage(), containsString("all shards failed")); } protected void runWithBlockedThreadPools(Runnable runnable) throws Exception { diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml index e49dc20e73406..25088f51e2b59 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml @@ -229,6 +229,7 @@ setup: Content-Type: application/json catch: bad_request search: + allow_partial_search_results: false body: query: script_score: @@ -243,6 +244,7 @@ setup: Content-Type: application/json catch: bad_request search: + allow_partial_search_results: false body: query: script_score: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml new file mode 100644 index 0000000000000..4c195a0e32623 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml @@ -0,0 +1,390 @@ +setup: + - requires: + cluster_features: ["mapper.vectors.bit_vectors"] + reason: "support for bit vectors added in 8.15" + test_runner_features: headers + + - do: + indices.create: + index: test-index + body: + settings: + number_of_shards: 1 + mappings: + properties: + vector: + type: dense_vector + index: false + element_type: bit + dims: 40 + indexed_vector: + type: dense_vector + element_type: bit + dims: 40 + index: true + similarity: l2_norm + + - do: + index: + index: test-index + id: "1" + body: + vector: [8, 5, -15, 1, -7] + indexed_vector: [8, 5, -15, 1, -7] + + - do: + index: + index: test-index + id: "2" + body: + vector: [-1, 115, -3, 4, -128] + indexed_vector: [-1, 115, -3, 4, -128] + + - do: + index: + index: test-index + id: "3" + body: + vector: [2, 18, -5, 0, -124] + indexed_vector: [2, 18, -5, 0, -124] + + - do: + indices.refresh: {} + +--- +"Test vector magnitude equality": + - skip: + features: close_to + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['vector'].magnitude" + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - close_to: {hits.hits.0._score: {value: 4.690416, error: 0.01}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 3.8729835, error: 0.01}} + + - match: {hits.hits.2._id: "3"} + - close_to: {hits.hits.2._score: {value: 3.4641016, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['indexed_vector'].magnitude" + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - close_to: {hits.hits.0._score: {value: 4.690416, error: 0.01}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 3.8729835, error: 0.01}} + + - match: {hits.hits.2._id: "3"} + - close_to: {hits.hits.2._score: {value: 3.4641016, error: 0.01}} + +--- +"Dot Product is not supported": + - do: + catch: bad_request + headers: + Content-Type: application/json + search: + body: + query: + script_score: + query: {match_all: {} } + script: + source: "dotProduct(params.query_vector, 'vector')" + params: + query_vector: [0, 111, -13, 14, -124] + - do: + catch: bad_request + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "dotProduct(params.query_vector, 'vector')" + params: + query_vector: "006ff30e84" + +--- +"Cosine Similarity is not supported": + - do: + catch: bad_request + headers: + Content-Type: application/json + search: + body: + query: + script_score: + query: {match_all: {} } + script: + source: "cosineSimilarity(params.query_vector, 'vector')" + params: + query_vector: [0, 111, -13, 14, -124] + - do: + catch: bad_request + headers: + Content-Type: application/json + search: + body: + query: + script_score: + query: {match_all: {} } + script: + source: "cosineSimilarity(params.query_vector, 'vector')" + params: + query_vector: "006ff30e84" + + - do: + catch: bad_request + headers: + Content-Type: application/json + search: + body: + query: + script_score: + query: {match_all: {} } + script: + source: "cosineSimilarity(params.query_vector, 'indexed_vector')" + params: + query_vector: [0, 111, -13, 14, -124] +--- +"L1 norm": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l1norm(params.query_vector, 'vector')" + params: + query_vector: [0, 111, -13, 14, -124] + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0._score: 17.0} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 16.0} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2._score: 11.0} + +--- +"L1 norm hexidecimal": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l1norm(params.query_vector, 'vector')" + params: + query_vector: "006ff30e84" + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0._score: 17.0} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 16.0} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2._score: 11.0} +--- +"L2 norm": + - requires: + test_runner_features: close_to + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l2norm(params.query_vector, 'vector')" + params: + query_vector: [0, 111, -13, 14, -124] + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - close_to: {hits.hits.0._score: {value: 4.123, error: 0.001}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 4, error: 0.001}} + + - match: {hits.hits.2._id: "3"} + - close_to: {hits.hits.2._score: {value: 3.316, error: 0.001}} +--- +"L2 norm hexidecimal": + - requires: + test_runner_features: close_to + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "l2norm(params.query_vector, 'vector')" + params: + query_vector: "006ff30e84" + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - close_to: {hits.hits.0._score: {value: 4.123, error: 0.001}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 4, error: 0.001}} + + - match: {hits.hits.2._id: "3"} + - close_to: {hits.hits.2._score: {value: 3.316, error: 0.001}} +--- +"Hamming distance": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "hamming(params.query_vector, 'vector')" + params: + query_vector: [0, 111, -13, 14, -124] + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0._score: 17.0} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 16.0} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2._score: 11.0} + + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "hamming(params.query_vector, 'indexed_vector')" + params: + query_vector: [0, 111, -13, 14, -124] + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0._score: 17.0} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 16.0} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2._score: 11.0} +--- +"Hamming distance hexidecimal": + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "hamming(params.query_vector, 'vector')" + params: + query_vector: "006ff30e84" + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0._score: 17.0} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 16.0} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2._score: 11.0} + + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "hamming(params.query_vector, 'indexed_vector')" + params: + query_vector: "006ff30e84" + + - match: {hits.total: 3} + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0._score: 17.0} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 16.0} + + - match: {hits.hits.2._id: "3"} + - match: {hits.hits.2._score: 11.0} diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 4678215dd5b60..2808dae31239c 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -81,7 +81,7 @@ *

* "field" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0)) * - * @deprecated use {@link org.elasticsearch.index.mapper.GeoShapeFieldMapper} + * @deprecated use the field mapper in the spatial module */ @Deprecated public class LegacyGeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper> { @@ -323,7 +323,7 @@ private static void setupPrefixTrees(GeoShapeFieldType ft) { private GeoShapeFieldType buildFieldType(LegacyGeoShapeParser parser, MapperBuilderContext context) { GeoShapeFieldType ft = new GeoShapeFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.get(), orientation.get().value(), parser, @@ -352,7 +352,7 @@ private static int getLevels(int treeLevels, double precisionInMeters, int defau public LegacyGeoShapeFieldMapper build(MapperBuilderContext context) { LegacyGeoShapeParser parser = new LegacyGeoShapeParser(); GeoShapeFieldType ft = buildFieldType(parser, context); - return new LegacyGeoShapeFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new LegacyGeoShapeFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); } } @@ -610,7 +610,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { return new Builder( - simpleName(), + leafName(), indexCreatedVersion, builder.ignoreMalformed.getDefaultValue().value(), builder.coerce.getDefaultValue().value() @@ -621,7 +621,7 @@ public FieldMapper.Builder getMergeBuilder() { protected void checkIncomingMergeType(FieldMapper mergeWith) { if (mergeWith instanceof LegacyGeoShapeFieldMapper == false && CONTENT_TYPE.equals(mergeWith.typeName())) { throw new IllegalArgumentException( - "mapper [" + name() + "] of type [geo_shape] cannot change strategy from [recursive] to [BKD]" + "mapper [" + fullPath() + "] of type [geo_shape] cannot change strategy from [recursive] to [BKD]" ); } super.checkIncomingMergeType(mergeWith); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index bf81003f5e1f4..899cc42fea1e0 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -127,7 +127,7 @@ private MatchOnlyTextFieldType buildFieldType(MapperBuilderContext context) { NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer(); TextSearchInfo tsi = new TextSearchInfo(Defaults.FIELD_TYPE, null, searchAnalyzer, searchQuoteAnalyzer); MatchOnlyTextFieldType ft = new MatchOnlyTextFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), tsi, indexAnalyzer, context.isSourceSynthetic(), @@ -140,7 +140,15 @@ private MatchOnlyTextFieldType buildFieldType(MapperBuilderContext context) { public MatchOnlyTextFieldMapper build(MapperBuilderContext context) { MatchOnlyTextFieldType tft = buildFieldType(context); MultiFields multiFields = multiFieldsBuilder.build(this, context); - return new MatchOnlyTextFieldMapper(name(), Defaults.FIELD_TYPE, tft, multiFields, copyTo, context.isSourceSynthetic(), this); + return new MatchOnlyTextFieldMapper( + leafName(), + Defaults.FIELD_TYPE, + tft, + multiFields, + copyTo, + context.isSourceSynthetic(), + this + ); } } @@ -397,7 +405,7 @@ public Map indexAnalyzers() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), indexCreatedVersion, indexAnalyzers).init(this); + return new Builder(leafName(), indexCreatedVersion, indexAnalyzers).init(this); } @Override @@ -436,10 +444,10 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new StringStoredFieldFieldLoader(fieldType().storedFieldNameForSyntheticSource(), simpleName(), null) { + return new StringStoredFieldFieldLoader(fieldType().storedFieldNameForSyntheticSource(), leafName(), null) { @Override protected void write(XContentBuilder b, Object value) throws IOException { b.value((String) value); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index c058dddd8f875..bd3845e1ee18a 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -91,9 +91,9 @@ protected Parameter[] getParameters() { @Override public RankFeatureFieldMapper build(MapperBuilderContext context) { return new RankFeatureFieldMapper( - name(), + leafName(), new RankFeatureFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), meta.getValue(), positiveScoreImpact.getValue(), nullValue.getValue() @@ -205,9 +205,9 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio value = context.parser().floatValue(); } - if (context.doc().getByKey(name()) != null) { + if (context.doc().getByKey(fullPath()) != null) { throw new IllegalArgumentException( - "[rank_feature] fields do not support indexing multiple values for the same field [" + name() + "] in the same document" + "[rank_feature] fields do not support indexing multiple values for the same field [" + fullPath() + "] in the same document" ); } @@ -215,7 +215,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio value = 1 / value; } - context.doc().addWithKey(name(), new FeatureField(NAME, name(), value)); + context.doc().addWithKey(fullPath(), new FeatureField(NAME, fullPath(), value)); } private static Float objectToFloat(Object value) { @@ -233,6 +233,6 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java index 5f0d44d1fb796..e6cb3010f9960 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java @@ -64,8 +64,8 @@ protected Parameter[] getParameters() { @Override public RankFeaturesFieldMapper build(MapperBuilderContext context) { return new RankFeaturesFieldMapper( - name(), - new RankFeaturesFieldType(context.buildFullName(name()), meta.getValue(), positiveScoreImpact.getValue()), + leafName(), + new RankFeaturesFieldType(context.buildFullName(leafName()), meta.getValue(), positiveScoreImpact.getValue()), multiFieldsBuilder.build(this, context), copyTo, positiveScoreImpact.getValue() @@ -137,7 +137,7 @@ public Map indexAnalyzers() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override @@ -174,7 +174,7 @@ public void parse(DocumentParserContext context) throws IOException { } else if (token == Token.VALUE_NULL) { // ignore feature, this is consistent with numeric fields } else if (token == Token.VALUE_NUMBER || token == Token.VALUE_STRING) { - final String key = name() + "." + feature; + final String key = fullPath() + "." + feature; float value = context.parser().floatValue(true); if (context.doc().getByKey(key) != null) { throw new IllegalArgumentException( @@ -187,7 +187,7 @@ public void parse(DocumentParserContext context) throws IOException { if (positiveScoreImpact == false) { value = 1 / value; } - context.doc().addWithKey(key, new FeatureField(name(), feature, value)); + context.doc().addWithKey(key, new FeatureField(fullPath(), feature, value)); } else { throw new IllegalArgumentException( "[rank_features] fields take hashes that map a feature to a strictly positive " diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index f472ce0855625..c346a7d669149 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -187,7 +187,7 @@ protected Parameter[] getParameters() { @Override public ScaledFloatFieldMapper build(MapperBuilderContext context) { ScaledFloatFieldType type = new ScaledFloatFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.getValue(), stored.getValue(), hasDocValues.getValue(), @@ -198,7 +198,7 @@ public ScaledFloatFieldMapper build(MapperBuilderContext context) { indexMode ); return new ScaledFloatFieldMapper( - name(), + leafName(), type, multiFieldsBuilder.build(this, context), copyTo, @@ -511,7 +511,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), ignoreMalformedByDefault, coerceByDefault, indexMode).metric(metricType).init(this); + return new Builder(leafName(), ignoreMalformedByDefault, coerceByDefault, indexMode).metric(metricType).init(this); } @Override @@ -531,7 +531,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio context.addIgnoredField(mappedFieldType.name()); if (isSourceSynthetic) { // Save a copy of the field so synthetic source can load it - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), context.parser())); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser())); } return; } else { @@ -559,7 +559,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio context.addIgnoredField(mappedFieldType.name()); if (isSourceSynthetic) { // Save a copy of the field so synthetic source can load it - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), context.parser())); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser())); } return; } else { @@ -721,15 +721,19 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (hasDocValues == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName(), ignoreMalformed.value()) { + return new SortedNumericDocValuesSyntheticFieldLoader(fullPath(), leafName(), ignoreMalformed.value()) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(decodeForSyntheticSource(value, scalingFactor)); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java index a5e011d5772f0..d521f9b2d2a31 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java @@ -187,7 +187,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer(); SearchAsYouTypeFieldType ft = new SearchAsYouTypeFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), fieldType, similarity.getValue(), analyzers.getSearchAnalyzer(), @@ -202,7 +202,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { prefixft.setIndexOptions(fieldType.indexOptions()); prefixft.setOmitNorms(true); prefixft.setStored(false); - final String fullName = context.buildFullName(name()); + final String fullName = context.buildFullName(leafName()); // wrap the root field's index analyzer with shingles and edge ngrams final Analyzer prefixIndexWrapper = SearchAsYouTypeAnalyzer.withShingleAndPrefix( indexAnalyzer.analyzer(), @@ -228,7 +228,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { final int shingleSize = i + 2; FieldType shingleft = new FieldType(fieldType); shingleft.setStored(false); - String fieldName = getShingleFieldName(context.buildFullName(name()), shingleSize); + String fieldName = getShingleFieldName(context.buildFullName(leafName()), shingleSize); // wrap the root field's index, search, and search quote analyzers with shingles final SearchAsYouTypeAnalyzer shingleIndexWrapper = SearchAsYouTypeAnalyzer.withShingle( indexAnalyzer.analyzer(), @@ -260,7 +260,7 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { ft.setPrefixField(prefixFieldType); ft.setShingleFields(shingleFieldTypes); return new SearchAsYouTypeFieldMapper( - name(), + leafName(), ft, copyTo, indexAnalyzers, @@ -721,7 +721,7 @@ protected String contentType() { } public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), builder.indexCreatedVersion, builder.analyzers.indexAnalyzers).init(this); + return new Builder(leafName(), builder.indexCreatedVersion, builder.analyzers.indexAnalyzers).init(this); } public static String getShingleFieldName(String parentField, int shingleSize) { diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java index 831306a8e8594..9db677ddddffa 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java @@ -77,17 +77,17 @@ protected Parameter[] getParameters() { @Override public TokenCountFieldMapper build(MapperBuilderContext context) { if (analyzer.getValue() == null) { - throw new MapperParsingException("Analyzer must be set for field [" + name() + "] but wasn't."); + throw new MapperParsingException("Analyzer must be set for field [" + leafName() + "] but wasn't."); } MappedFieldType ft = new TokenCountFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), index.getValue(), store.getValue(), hasDocValues.getValue(), nullValue.getValue(), meta.getValue() ); - return new TokenCountFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, this); + return new TokenCountFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, this); } } @@ -163,7 +163,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio if (value == null) { tokenCount = nullValue; } else { - tokenCount = countPositions(analyzer, name(), value, enablePositionIncrements); + tokenCount = countPositions(analyzer, fullPath(), value, enablePositionIncrements); } NumberFieldMapper.NumberType.INTEGER.addFields(context.doc(), fieldType().name(), tokenCount, index, hasDocValues, store); @@ -213,6 +213,6 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java index 067b341d6394c..a527d341871db 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java @@ -690,7 +690,7 @@ private static void assertSearchAsYouTypeFieldMapper(SearchAsYouTypeFieldMapper assertSearchAsYouTypeFieldType(mapper, mapper.fieldType(), maxShingleSize, analyzerName, mapper.prefixField().fieldType()); assertThat(mapper.prefixField(), notNullValue()); - assertThat(mapper.prefixField().fieldType().parentField, equalTo(mapper.name())); + assertThat(mapper.prefixField().fieldType().parentField, equalTo(mapper.fullPath())); assertPrefixFieldType(mapper.prefixField(), mapper.indexAnalyzers(), maxShingleSize, analyzerName); for (int shingleSize = 2; shingleSize <= maxShingleSize; shingleSize++) { @@ -709,7 +709,7 @@ private static void assertSearchAsYouTypeFieldMapper(SearchAsYouTypeFieldMapper assertThat(mapper.shingleFields().length, equalTo(numberOfShingleSubfields)); final Set fieldsUsingSourcePath = new HashSet<>(); - mapper.sourcePathUsedBy().forEachRemaining(mapper1 -> fieldsUsingSourcePath.add(mapper1.name())); + mapper.sourcePathUsedBy().forEachRemaining(mapper1 -> fieldsUsingSourcePath.add(mapper1.fullPath())); int multiFields = 0; for (FieldMapper ignored : mapper.multiFields()) { multiFields++; @@ -717,12 +717,12 @@ private static void assertSearchAsYouTypeFieldMapper(SearchAsYouTypeFieldMapper assertThat(fieldsUsingSourcePath.size(), equalTo(numberOfShingleSubfields + 1 + multiFields)); final Set expectedFieldsUsingSourcePath = new HashSet<>(); - expectedFieldsUsingSourcePath.add(mapper.prefixField().name()); + expectedFieldsUsingSourcePath.add(mapper.prefixField().fullPath()); for (ShingleFieldMapper shingleFieldMapper : mapper.shingleFields()) { - expectedFieldsUsingSourcePath.add(shingleFieldMapper.name()); + expectedFieldsUsingSourcePath.add(shingleFieldMapper.fullPath()); } for (FieldMapper multiField : mapper.multiFields()) { - expectedFieldsUsingSourcePath.add(multiField.name()); + expectedFieldsUsingSourcePath.add(multiField.fullPath()); } assertThat(fieldsUsingSourcePath, equalTo(expectedFieldsUsingSourcePath)); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index e1df6c130c9fe..dc760c0b07b71 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -123,16 +123,16 @@ public ParentJoinFieldMapper build(MapperBuilderContext context) { "Adding multifields to [" + CONTENT_TYPE + "] mappers has no effect and will be forbidden in future" ); } - checkObjectOrNested(context, name()); + checkObjectOrNested(context, leafName()); final Map parentIdFields = new HashMap<>(); relations.get() .stream() - .map(relation -> new ParentIdFieldMapper(name() + "#" + relation.parent(), eagerGlobalOrdinals.get())) - .forEach(mapper -> parentIdFields.put(mapper.name(), mapper)); - Joiner joiner = new Joiner(name(), relations.get()); + .map(relation -> new ParentIdFieldMapper(leafName() + "#" + relation.parent(), eagerGlobalOrdinals.get())) + .forEach(mapper -> parentIdFields.put(mapper.fullPath(), mapper)); + Joiner joiner = new Joiner(leafName(), relations.get()); return new ParentJoinFieldMapper( - name(), - new JoinFieldType(context.buildFullName(name()), joiner, meta.get()), + leafName(), + new JoinFieldType(context.buildFullName(leafName()), joiner, meta.get()), Collections.unmodifiableMap(parentIdFields), eagerGlobalOrdinals.get(), relations.get() @@ -249,7 +249,7 @@ protected boolean supportsParsingObject() { @Override public void parse(DocumentParserContext context) throws IOException { - context.path().add(simpleName()); + context.path().add(leafName()); XContentParser.Token token = context.parser().currentToken(); String name = null; String parent = null; @@ -264,13 +264,17 @@ public void parse(DocumentParserContext context) throws IOException { } else if ("parent".equals(currentFieldName)) { parent = context.parser().text(); } else { - throw new IllegalArgumentException("unknown field name [" + currentFieldName + "] in join field [" + name() + "]"); + throw new IllegalArgumentException( + "unknown field name [" + currentFieldName + "] in join field [" + fullPath() + "]" + ); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("parent".equals(currentFieldName)) { parent = context.parser().numberValue().toString(); } else { - throw new IllegalArgumentException("unknown field name [" + currentFieldName + "] in join field [" + name() + "]"); + throw new IllegalArgumentException( + "unknown field name [" + currentFieldName + "] in join field [" + fullPath() + "]" + ); } } } @@ -278,23 +282,23 @@ public void parse(DocumentParserContext context) throws IOException { name = context.parser().text(); parent = null; } else { - throw new IllegalStateException("[" + name() + "] expected START_OBJECT or VALUE_STRING but was: " + token); + throw new IllegalStateException("[" + fullPath() + "] expected START_OBJECT or VALUE_STRING but was: " + token); } if (name == null) { - throw new IllegalArgumentException("null join name in field [" + name() + "]"); + throw new IllegalArgumentException("null join name in field [" + fullPath() + "]"); } if (fieldType().joiner.knownRelation(name) == false) { - throw new IllegalArgumentException("unknown join name [" + name + "] for field [" + name() + "]"); + throw new IllegalArgumentException("unknown join name [" + name + "] for field [" + fullPath() + "]"); } if (fieldType().joiner.childTypeExists(name)) { // Index the document as a child if (parent == null) { - throw new IllegalArgumentException("[parent] is missing for join field [" + name() + "]"); + throw new IllegalArgumentException("[parent] is missing for join field [" + fullPath() + "]"); } if (context.routing() == null) { - throw new IllegalArgumentException("[routing] is missing for join field [" + name() + "]"); + throw new IllegalArgumentException("[routing] is missing for join field [" + fullPath() + "]"); } String fieldName = fieldType().joiner.parentJoinField(name); parentIdFields.get(fieldName).indexValue(context, parent); @@ -329,7 +333,7 @@ protected void doXContentBody(XContentBuilder builder, Params params) throws IOE @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java index c82ba3f5f26b5..5a5ccb640f03d 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java @@ -68,7 +68,7 @@ public void testNoDocs() throws IOException { assertEquals(0, childrenToParent.getDocCount()); Aggregation parentAggregation = childrenToParent.getAggregations().get("in_parent"); assertEquals(0, childrenToParent.getDocCount()); - assertNotNull("Aggregations: " + childrenToParent.getAggregations().asMap(), parentAggregation); + assertNotNull("Aggregations: " + childrenToParent.getAggregations().asList(), parentAggregation); assertEquals(Double.POSITIVE_INFINITY, ((Min) parentAggregation).value(), Double.MIN_VALUE); assertFalse(JoinAggregationInspectionHelper.hasValue(childrenToParent)); }); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java index ff910e8876a90..ccbb011d5c755 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java @@ -430,13 +430,13 @@ public void testSubFields() throws IOException { Iterator it = mapper.iterator(); FieldMapper next = (FieldMapper) it.next(); - assertThat(next.name(), equalTo("join_field#parent")); + assertThat(next.fullPath(), equalTo("join_field#parent")); assertTrue(next.fieldType().isSearchable()); assertTrue(next.fieldType().isAggregatable()); assertTrue(it.hasNext()); next = (FieldMapper) it.next(); - assertThat(next.name(), equalTo("join_field#child")); + assertThat(next.fullPath(), equalTo("join_field#child")); assertTrue(next.fieldType().isSearchable()); assertTrue(next.fieldType().isAggregatable()); diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index 88a39fe4aebc8..0fe7de9bbb23c 100644 --- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -1358,7 +1358,7 @@ public void testKnnQueryNotSupportedInPercolator() throws IOException { """); indicesAdmin().prepareCreate("index1").setMapping(mappings).get(); ensureGreen(); - QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, null); + QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, 10, null); IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("knn_query1") .setSource(jsonBuilder().startObject().field("my_query", knnVectorQueryBuilder).endObject()); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 9a2653a61b60d..27d8695f9ae6a 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -655,6 +655,7 @@ public > IFD getForField( IndexFieldData.Builder builder = fieldType.fielddataBuilder( new FieldDataContext( delegate.getFullyQualifiedIndex().getName(), + delegate.getIndexSettings(), delegate::lookup, this::sourcePath, fielddataOperation diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 954dd9db0d376..ad936a5491b69 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -100,8 +100,9 @@ public class PercolatorFieldMapper extends FieldMapper { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), searchExecutionContext, mapUnmappedFieldsAsText, indexCreatedVersion, clusterTransportVersion) - .init(this); + return new Builder(leafName(), searchExecutionContext, mapUnmappedFieldsAsText, indexCreatedVersion, clusterTransportVersion).init( + this + ); } static class Builder extends FieldMapper.Builder { @@ -135,10 +136,10 @@ protected Parameter[] getParameters() { @Override public PercolatorFieldMapper build(MapperBuilderContext context) { - PercolatorFieldType fieldType = new PercolatorFieldType(context.buildFullName(name()), meta.getValue()); + PercolatorFieldType fieldType = new PercolatorFieldType(context.buildFullName(leafName()), meta.getValue()); // TODO should percolator even allow multifields? MultiFields multiFields = multiFieldsBuilder.build(this, context); - context = context.createChildContext(name(), null); + context = context.createChildContext(leafName(), null); KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder( EXTRACTED_TERMS_FIELD_NAME, context, @@ -162,7 +163,7 @@ public PercolatorFieldMapper build(MapperBuilderContext context) { fieldType.mapUnmappedFieldsAsText = mapUnmappedFieldsAsText; return new PercolatorFieldMapper( - name(), + leafName(), fieldType, multiFields, copyTo, @@ -406,7 +407,7 @@ protected boolean supportsParsingObject() { @Override public void parse(DocumentParserContext context) throws IOException { SearchExecutionContext executionContext = this.searchExecutionContext.get(); - if (context.doc().getField(queryBuilderField.name()) != null) { + if (context.doc().getField(queryBuilderField.fullPath()) != null) { // If a percolator query has been defined in an array object then multiple percolator queries // could be provided. In order to prevent this we fail if we try to parse more than one query // for the current document. @@ -492,27 +493,27 @@ void processQuery(Query query, DocumentParserContext context) { builder.append(new BytesRef(extraction.field())); builder.append(FIELD_VALUE_SEPARATOR); builder.append(extraction.bytes()); - doc.add(new StringField(queryTermsField.name(), builder.toBytesRef(), Field.Store.NO)); + doc.add(new StringField(queryTermsField.fullPath(), builder.toBytesRef(), Field.Store.NO)); } else if (extraction.range != null) { byte[] min = extraction.range.lowerPoint; byte[] max = extraction.range.upperPoint; - doc.add(new BinaryRange(rangeFieldMapper.name(), encodeRange(extraction.range.fieldName, min, max))); + doc.add(new BinaryRange(rangeFieldMapper.fullPath(), encodeRange(extraction.range.fieldName, min, max))); } } if (result.matchAllDocs) { - doc.add(new StringField(extractionResultField.name(), EXTRACTION_FAILED, Field.Store.NO)); + doc.add(new StringField(extractionResultField.fullPath(), EXTRACTION_FAILED, Field.Store.NO)); if (result.verified) { - doc.add(new StringField(extractionResultField.name(), EXTRACTION_COMPLETE, Field.Store.NO)); + doc.add(new StringField(extractionResultField.fullPath(), EXTRACTION_COMPLETE, Field.Store.NO)); } } else if (result.verified) { - doc.add(new StringField(extractionResultField.name(), EXTRACTION_COMPLETE, Field.Store.NO)); + doc.add(new StringField(extractionResultField.fullPath(), EXTRACTION_COMPLETE, Field.Store.NO)); } else { - doc.add(new StringField(extractionResultField.name(), EXTRACTION_PARTIAL, Field.Store.NO)); + doc.add(new StringField(extractionResultField.fullPath(), EXTRACTION_PARTIAL, Field.Store.NO)); } context.addToFieldNames(fieldType().name()); - doc.add(new NumericDocValuesField(minimumShouldMatchFieldMapper.name(), result.minimumShouldMatch)); + doc.add(new NumericDocValuesField(minimumShouldMatchFieldMapper.fullPath(), result.minimumShouldMatch)); } static SearchExecutionContext configureContext(SearchExecutionContext context, boolean mapUnmappedFieldsAsString) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java index e3439c821e81f..917c1699cfdb9 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java @@ -87,7 +87,7 @@ public void testStoringQueryBuilders() throws IOException { when(searchExecutionContext.getWriteableRegistry()).thenReturn(writableRegistry()); when(searchExecutionContext.getParserConfig()).thenReturn(parserConfig()); when(searchExecutionContext.getForField(fieldMapper.fieldType(), fielddataOperation)).thenReturn( - new BytesBinaryIndexFieldData(fieldMapper.name(), CoreValuesSourceType.KEYWORD) + new BytesBinaryIndexFieldData(fieldMapper.fullPath(), CoreValuesSourceType.KEYWORD) ); when(searchExecutionContext.getFieldType(Mockito.anyString())).thenAnswer(invocation -> { final String fieldName = (String) invocation.getArguments()[0]; diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java index 323b829fe93ff..190616b9980f0 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryConcurrentTests.java @@ -11,11 +11,9 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.reindex.BulkByScrollResponse; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; @@ -26,44 +24,29 @@ public class DeleteByQueryConcurrentTests extends ReindexTestCase { public void testConcurrentDeleteByQueriesOnDifferentDocs() throws Throwable { - final Thread[] threads = new Thread[scaledRandomIntBetween(2, 5)]; + final int threadCount = scaledRandomIntBetween(2, 5); final long docs = randomIntBetween(1, 50); List builders = new ArrayList<>(); for (int i = 0; i < docs; i++) { - for (int t = 0; t < threads.length; t++) { + for (int t = 0; t < threadCount; t++) { builders.add(prepareIndex("test").setSource("field", t)); } } indexRandom(true, true, true, builders); - final CountDownLatch start = new CountDownLatch(1); - for (int t = 0; t < threads.length; t++) { - final int threadNum = t; - assertHitCount(prepareSearch("test").setSize(0).setQuery(QueryBuilders.termQuery("field", threadNum)), docs); - - Runnable r = () -> { - try { - start.await(); - - assertThat( - deleteByQuery().source("_all").filter(termQuery("field", threadNum)).refresh(true).get(), - matcher().deleted(docs) - ); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - }; - threads[t] = new Thread(r); - threads[t].start(); + for (int t = 0; t < threadCount; t++) { + assertHitCount(prepareSearch("test").setSize(0).setQuery(QueryBuilders.termQuery("field", t)), docs); } - - start.countDown(); - for (Thread thread : threads) { - thread.join(); - } - - for (int t = 0; t < threads.length; t++) { + startInParallel( + threadCount, + threadNum -> assertThat( + deleteByQuery().source("_all").filter(termQuery("field", threadNum)).refresh(true).get(), + matcher().deleted(docs) + ) + ); + + for (int t = 0; t < threadCount; t++) { assertHitCount(prepareSearch("test").setSize(0).setQuery(QueryBuilders.termQuery("field", t)), 0); } } @@ -77,33 +60,12 @@ public void testConcurrentDeleteByQueriesOnSameDocs() throws Throwable { } indexRandom(true, true, true, builders); - final Thread[] threads = new Thread[scaledRandomIntBetween(2, 9)]; + final int threadCount = scaledRandomIntBetween(2, 9); - final CountDownLatch start = new CountDownLatch(1); final MatchQueryBuilder query = matchQuery("foo", "bar"); final AtomicLong deleted = new AtomicLong(0); - - for (int t = 0; t < threads.length; t++) { - Runnable r = () -> { - try { - start.await(); - - BulkByScrollResponse response = deleteByQuery().source("test").filter(query).refresh(true).get(); - // Some deletions might fail due to version conflict, but - // what matters here is the total of successful deletions - deleted.addAndGet(response.getDeleted()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - }; - threads[t] = new Thread(r); - threads[t].start(); - } - - start.countDown(); - for (Thread thread : threads) { - thread.join(); - } + // Some deletions might fail due to version conflict, but what matters here is the total of successful deletions + startInParallel(threadCount, i -> deleted.addAndGet(deleteByQuery().source("test").filter(query).refresh(true).get().getDeleted())); assertHitCount(prepareSearch("test").setSize(0), 0L); assertThat(deleted.get(), equalTo(docs)); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index d53c379a37644..72b48c5903629 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -443,4 +444,19 @@ protected void doClose() { } super.doClose(); } + + @Override + public String getAnalysisFailureExtraDetail() { + return Strings.format( + """ + Elasticsearch observed the storage system underneath this repository behaved incorrectly which indicates it is not \ + suitable for use with Elasticsearch snapshots. Typically this happens when using storage other than AWS S3 which \ + incorrectly claims to be S3-compatible. If so, please report this incompatibility to your storage supplier. Do not report \ + Elasticsearch issues involving storage systems which claim to be S3-compatible unless you can demonstrate that the same \ + issue exists when using a genuine AWS S3 repository. See [%s] for further information about repository analysis, and [%s] \ + for further information about support for S3-compatible repository implementations.""", + ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS, + ReferenceDocs.S3_COMPATIBLE_REPOSITORIES + ); + } } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index fcb0e82505dac..4bbc791e5fe21 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -11,6 +11,7 @@ import com.amazonaws.services.s3.AbstractAmazonS3; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -28,6 +29,7 @@ import java.util.Map; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -152,4 +154,24 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { ); } + public void testAnalysisFailureDetail() { + try ( + S3Repository s3repo = createS3Repo( + new RepositoryMetadata("dummy-repo", "mock", Settings.builder().put(S3Repository.BUCKET_SETTING.getKey(), "bucket").build()) + ) + ) { + assertThat( + s3repo.getAnalysisFailureExtraDetail(), + allOf( + containsString("storage system underneath this repository behaved incorrectly"), + containsString("incorrectly claims to be S3-compatible"), + containsString("report this incompatibility to your storage supplier"), + containsString("unless you can demonstrate that the same issue exists when using a genuine AWS S3 repository"), + containsString(ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS.toString()), + containsString(ReferenceDocs.S3_COMPATIBLE_REPOSITORIES.toString()) + ) + ); + } + } + } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java index 5ad1152d65e85..c4c35b410af78 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java @@ -636,9 +636,10 @@ public void close() { @Override public void accept(RestChannel channel) { localRefs.mustIncRef(); - client.execute(TYPE, new Request(), new RestActionListener<>(channel) { + client.execute(TYPE, new Request(), ActionListener.releaseAfter(new RestActionListener<>(channel) { @Override protected void processResponse(Response response) { + localRefs.mustIncRef(); channel.sendResponse(RestResponse.chunked(RestStatus.OK, response.getResponseBodyPart(), () -> { // cancellation notification only happens while processing a continuation, not while computing // the next one; prompt cancellation requires use of something like RestCancellableNodeClient @@ -647,7 +648,10 @@ protected void processResponse(Response response) { localRefs.decRef(); })); } - }); + }, () -> { + assertSame(localRefs, refs); + localRefs.decRef(); + })); } }; } else { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index c9beeef246703..b915011514d9a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -21,6 +21,7 @@ import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpObject; import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.ssl.SslCloseCompletionEvent; import io.netty.util.ReferenceCountUtil; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.PromiseCombiner; @@ -477,6 +478,16 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { } } + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { + if (evt instanceof SslCloseCompletionEvent closeEvent) { + if (closeEvent.isSuccess() && ctx.channel().isActive()) { + logger.trace("received TLS close_notify, closing connection {}", ctx.channel()); + ctx.channel().close(); + } + } + } + private record WriteOperation(HttpObject msg, ChannelPromise promise) { void failAsClosedChannel() { diff --git a/muted-tests.yml b/muted-tests.yml index a17e95e9a5b3f..61fb77502f7d2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -1,17 +1,10 @@ tests: -- class: "org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilterTests" - issue: "https://github.com/elastic/elasticsearch/issues/108649" - method: "testManyRandomDocs" -- class: "org.elasticsearch.cluster.coordination.CoordinatorVotingConfigurationTests" - issue: "https://github.com/elastic/elasticsearch/issues/108729" - method: "testClusterUUIDLogging" - class: "org.elasticsearch.xpack.textstructure.structurefinder.TimestampFormatFinderTests" issue: "https://github.com/elastic/elasticsearch/issues/108855" method: "testGuessIsDayFirstFromLocale" - class: "org.elasticsearch.test.rest.ClientYamlTestSuiteIT" issue: "https://github.com/elastic/elasticsearch/issues/108857" - method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale\ - \ dependent mappings / dates}" + method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale dependent mappings / dates}" - class: "org.elasticsearch.upgrades.SearchStatesIT" issue: "https://github.com/elastic/elasticsearch/issues/108991" method: "testCanMatch" @@ -20,8 +13,7 @@ tests: method: "testTrainedModelInference" - class: "org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT" issue: "https://github.com/elastic/elasticsearch/issues/109188" - method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale\ - \ dependent mappings / dates}" + method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale dependent mappings / dates}" - class: "org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT" issue: "https://github.com/elastic/elasticsearch/issues/109189" method: "test {p0=esql/70_locale/Date format with Italian locale}" @@ -36,8 +28,7 @@ tests: method: "testTimestampFieldTypeExposedByAllIndicesServices" - class: "org.elasticsearch.analysis.common.CommonAnalysisClientYamlTestSuiteIT" issue: "https://github.com/elastic/elasticsearch/issues/109318" - method: "test {yaml=analysis-common/50_char_filters/pattern_replace error handling\ - \ (too complex pattern)}" + method: "test {yaml=analysis-common/50_char_filters/pattern_replace error handling (too complex pattern)}" - class: "org.elasticsearch.xpack.ml.integration.ClassificationHousePricingIT" issue: "https://github.com/elastic/elasticsearch/issues/101598" method: "testFeatureImportanceValues" @@ -50,34 +41,56 @@ tests: - class: "org.elasticsearch.xpack.inference.InferenceCrudIT" issue: "https://github.com/elastic/elasticsearch/issues/109391" method: "testDeleteEndpointWhileReferencedByPipeline" -- class: "org.elasticsearch.xpack.rollup.job.RollupIndexerStateTests" - issue: "https://github.com/elastic/elasticsearch/issues/109627" - method: "testMultipleJobTriggering" -- class: "org.elasticsearch.index.store.FsDirectoryFactoryTests" - issue: "https://github.com/elastic/elasticsearch/issues/109681" - class: "org.elasticsearch.xpack.test.rest.XPackRestIT" issue: "https://github.com/elastic/elasticsearch/issues/109687" method: "test {p0=sql/translate/Translate SQL}" -- class: "org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT" - issue: "https://github.com/elastic/elasticsearch/issues/109806" - method: "testInsufficientPrivilege" -- class: org.elasticsearch.action.search.SearchProgressActionListenerIT - method: testSearchProgressWithHits - issue: https://github.com/elastic/elasticsearch/issues/109830 -- class: "org.elasticsearch.xpack.shutdown.NodeShutdownReadinessIT" - issue: "https://github.com/elastic/elasticsearch/issues/109838" - method: "testShutdownReadinessService" -- class: "org.elasticsearch.xpack.security.ScrollHelperIntegTests" - issue: "https://github.com/elastic/elasticsearch/issues/109905" - method: "testFetchAllEntities" -- class: "org.elasticsearch.xpack.ml.integration.AutodetectMemoryLimitIT" - issue: "https://github.com/elastic/elasticsearch/issues/109904" -- class: "org.elasticsearch.xpack.esql.action.AsyncEsqlQueryActionIT" - issue: "https://github.com/elastic/elasticsearch/issues/109944" - method: "testBasicAsyncExecution" -- class: "org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests" - issue: "https://github.com/elastic/elasticsearch/issues/110015" - +- class: "org.elasticsearch.action.admin.indices.rollover.RolloverIT" + issue: "https://github.com/elastic/elasticsearch/issues/110034" + method: "testRolloverWithClosedWriteIndex" +- class: org.elasticsearch.index.store.FsDirectoryFactoryTests + method: testStoreDirectory + issue: https://github.com/elastic/elasticsearch/issues/110210 +- class: org.elasticsearch.index.store.FsDirectoryFactoryTests + method: testPreload + issue: https://github.com/elastic/elasticsearch/issues/110211 +- class: "org.elasticsearch.rest.RestControllerIT" + issue: "https://github.com/elastic/elasticsearch/issues/110225" +- class: org.elasticsearch.upgrades.SecurityIndexRolesMetadataMigrationIT + method: testMetadataMigratedAfterUpgrade + issue: https://github.com/elastic/elasticsearch/issues/110232 +- class: org.elasticsearch.compute.lucene.ValueSourceReaderTypeConversionTests + method: testLoadAll + issue: https://github.com/elastic/elasticsearch/issues/110244 +- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT + method: testMinVersionAsNewVersion + issue: https://github.com/elastic/elasticsearch/issues/95384 +- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT + method: testCcsMinimizeRoundtripsIsFalse + issue: https://github.com/elastic/elasticsearch/issues/101974 +- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT + method: testMinVersionAsOldVersion + issue: https://github.com/elastic/elasticsearch/issues/109454 +- class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" + issue: "https://github.com/elastic/elasticsearch/issues/110408" + method: "testCreateAndRestorePartialSearchableSnapshot" +- class: "org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT" + issue: "https://github.com/elastic/elasticsearch/issues/110719" + method: "test {p0=search.vectors/45_knn_search_byte/Test nonexistent field}" +- class: "org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT" + issue: "https://github.com/elastic/elasticsearch/issues/110720" + method: "test {p0=search.vectors/40_knn_search/Test nonexistent field}" +- class: org.elasticsearch.xpack.security.LicenseDLSFLSRoleIT + method: testQueryDLSFLSRolesShowAsDisabled + issue: https://github.com/elastic/elasticsearch/issues/110729 +- class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests + method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications + issue: https://github.com/elastic/elasticsearch/issues/110789 +- class: org.elasticsearch.xpack.security.ScrollHelperIntegTests + method: testFetchAllEntities + issue: https://github.com/elastic/elasticsearch/issues/110786 +- class: org.elasticsearch.xpack.searchablesnapshots.cache.common.CacheFileTests + method: testCacheFileCreatedAsSparseFile + issue: https://github.com/elastic/elasticsearch/issues/110801 # Examples: # diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java index 1da274ff236da..2d27447b618e9 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java @@ -327,7 +327,7 @@ public ICUCollationKeywordFieldMapper build(MapperBuilderContext context) { final CollatorParams params = collatorParams(); final Collator collator = params.buildCollator(); CollationFieldType ft = new CollationFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.getValue(), stored.getValue(), hasDocValues.getValue(), @@ -337,7 +337,7 @@ public ICUCollationKeywordFieldMapper build(MapperBuilderContext context) { meta.getValue() ); return new ICUCollationKeywordFieldMapper( - name(), + leafName(), buildFieldType(), ft, multiFieldsBuilder.build(this, context), @@ -508,7 +508,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override @@ -526,7 +526,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } if (value.length() > ignoreAbove) { - context.addIgnoredField(name()); + context.addIgnoredField(fullPath()); return; } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index db817917ff29e..dac8e051f25f8 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -139,7 +139,7 @@ private AnnotatedTextFieldType buildFieldType(FieldType fieldType, MapperBuilder wrapAnalyzer(analyzers.getSearchQuoteAnalyzer()) ); return new AnnotatedTextFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), store.getValue(), tsi, context.isSourceSynthetic(), @@ -158,12 +158,12 @@ public AnnotatedTextFieldMapper build(MapperBuilderContext context) { if (analyzers.positionIncrementGap.isConfigured()) { if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { throw new IllegalArgumentException( - "Cannot set position_increment_gap on field [" + name() + "] without positions enabled" + "Cannot set position_increment_gap on field [" + leafName() + "] without positions enabled" ); } } return new AnnotatedTextFieldMapper( - name(), + leafName(), fieldType, buildFieldType(fieldType, context, multiFields), multiFields, @@ -564,7 +564,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { return new Builder( - simpleName(), + leafName(), builder.indexCreatedVersion, builder.analyzers.indexAnalyzers, builder.isSyntheticSourceEnabledViaIndexMode @@ -580,11 +580,11 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } if (fieldType.stored()) { - return new StringStoredFieldFieldLoader(name(), simpleName(), null) { + return new StringStoredFieldFieldLoader(fullPath(), leafName(), null) { @Override protected void write(XContentBuilder b, Object value) throws IOException { b.value((String) value); @@ -594,7 +594,7 @@ protected void write(XContentBuilder b, Object value) throws IOException { var kwd = TextFieldMapper.SyntheticSourceHelper.getKeywordFieldMapperForSyntheticSource(this); if (kwd != null) { - return kwd.syntheticFieldLoader(simpleName()); + return kwd.syntheticFieldLoader(leafName()); } throw new IllegalArgumentException( @@ -602,7 +602,7 @@ protected void write(XContentBuilder b, Object value) throws IOException { Locale.ROOT, "field [%s] of type [%s] doesn't support synthetic source unless it is stored or has a sub-field of" + " type [keyword] with doc values or stored and without a normalizer", - name(), + fullPath(), typeName() ) ); diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java index 4e3a53d64a841..a5319387a2b68 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java @@ -335,7 +335,7 @@ public void testStoreParameterDefaults() throws IOException { var source = source(TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE, b -> { b.field("field", "1234"); if (timeSeriesIndexMode) { - b.field("@timestamp", randomMillisUpToYear9999()); + b.field("@timestamp", "2000-10-10T23:40:53.384Z"); b.field("dimension", "dimension1"); } }, null); diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 44f52105f64c9..0b29bc9062917 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -55,8 +55,8 @@ protected Parameter[] getParameters() { @Override public Murmur3FieldMapper build(MapperBuilderContext context) { return new Murmur3FieldMapper( - name(), - new Murmur3FieldType(context.buildFullName(name()), stored.getValue(), meta.getValue()), + leafName(), + new Murmur3FieldType(context.buildFullName(leafName()), stored.getValue(), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo ); @@ -100,7 +100,7 @@ protected Murmur3FieldMapper(String simpleName, MappedFieldType mappedFieldType, @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override @@ -116,7 +116,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio final long hash = MurmurHash3.hash128(bytes.bytes, bytes.offset, bytes.length, 0, new MurmurHash3.Hash128()).h1; context.doc().add(new SortedNumericDocValuesField(fieldType().name(), hash)); if (fieldType().isStored()) { - context.doc().add(new StoredField(name(), hash)); + context.doc().add(new StoredField(fullPath(), hash)); } } } diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index dd6e7035a1c17..b188f4b148590 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -90,7 +90,7 @@ public void postParse(DocumentParserContext context) { return; } final int value = context.sourceToParse().source().length(); - NumberType.INTEGER.addFields(context.doc(), name(), value, true, true, true); + NumberType.INTEGER.addFields(context.doc(), fullPath(), value, true, true, true); } @Override diff --git a/qa/ccs-common-rest/build.gradle b/qa/ccs-common-rest/build.gradle index 82fe7c48d87f8..e5e8c5a489d5b 100644 --- a/qa/ccs-common-rest/build.gradle +++ b/qa/ccs-common-rest/build.gradle @@ -11,8 +11,7 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' restResources { restApi { include '_common', 'bulk', 'count', 'cluster', 'field_caps', 'get', 'knn_search', 'index', 'indices', 'msearch', - 'search', 'async_search', 'graph', '*_point_in_time', 'info', 'scroll', 'clear_scroll', 'search_mvt', 'eql', 'sql', - 'put_script' + 'search', 'async_search', 'graph', '*_point_in_time', 'info', 'scroll', 'clear_scroll', 'search_mvt', 'eql', 'sql' } restTests { includeCore 'field_caps', 'msearch', 'search', 'suggest', 'scroll', "indices.resolve_index" diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index c48674831c422..b63522daa4b4c 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -58,7 +58,11 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "processTestResources" mustRunAfter("precommit") doFirst { - localCluster.get().nextNodeToNextVersion() + def cluster = localCluster.get() + cluster.nodes.forEach { node -> + node.getAllTransportPortURI() + } + cluster.nextNodeToNextVersion() } } diff --git a/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java b/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java index 4fbdfa65d40ba..e978e7f2a5c11 100644 --- a/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java +++ b/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java @@ -21,7 +21,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestInterceptor; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.usage.UsageService; public class CustomRestPlugin extends Plugin implements RestServerActionPlugin { @@ -59,9 +59,9 @@ public CustomController( NodeClient client, CircuitBreakerService circuitBreakerService, UsageService usageService, - Tracer tracer + TelemetryProvider telemetryProvider ) { - super(interceptor, client, circuitBreakerService, usageService, tracer); + super(interceptor, client, circuitBreakerService, usageService, telemetryProvider); } @Override @@ -83,9 +83,9 @@ public RestController getRestController( NodeClient client, CircuitBreakerService circuitBreakerService, UsageService usageService, - Tracer tracer + TelemetryProvider telemetryProvider ) { - return new CustomController(interceptor, client, circuitBreakerService, usageService, tracer); + return new CustomController(interceptor, client, circuitBreakerService, usageService, telemetryProvider); } } diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index aac4b6a020d4b..6c924fe8e429a 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -59,20 +59,13 @@ private int indexDocs(String index, final int idStart, final int numDocs) throws */ private int indexDocWithConcurrentUpdates(String index, final int docId, int nUpdates) throws IOException, InterruptedException { indexDocs(index, docId, 1); - Thread[] indexThreads = new Thread[nUpdates]; - for (int i = 0; i < nUpdates; i++) { - indexThreads[i] = new Thread(() -> { - try { - indexDocs(index, docId, 1); - } catch (IOException e) { - throw new AssertionError("failed while indexing [" + e.getMessage() + "]"); - } - }); - indexThreads[i].start(); - } - for (Thread indexThread : indexThreads) { - indexThread.join(); - } + runInParallel(nUpdates, i -> { + try { + indexDocs(index, docId, 1); + } catch (IOException e) { + throw new AssertionError("failed while indexing [" + e.getMessage() + "]"); + } + }); return nUpdates + 1; } diff --git a/qa/packaging/build.gradle b/qa/packaging/build.gradle index 758dfe6661766..d1890e8c49fcf 100644 --- a/qa/packaging/build.gradle +++ b/qa/packaging/build.gradle @@ -36,3 +36,8 @@ tasks.named("test").configure { enabled = false } tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } + +tasks.named('resolveAllDependencies') { + // avoid resolving all elasticsearch distros + enabled = false +} diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsUpgradeIT.java new file mode 100644 index 0000000000000..c80911fe5fbcf --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsUpgradeIT.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.DefaultLocalClusterSpecBuilder; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Map; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class FileSettingsUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + @BeforeClass + public static void checkVersion() { + assumeTrue("Only valid when upgrading from pre-file settings", getOldClusterTestVersion().before(new Version(8, 4, 0))); + } + + private static final String settingsJSON = """ + { + "metadata": { + "version": "1", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": { + "indices.recovery.max_bytes_per_sec": "50mb" + } + } + }"""; + + private static final TemporaryFolder repoDirectory = new TemporaryFolder(); + + private static final ElasticsearchCluster cluster = new DefaultLocalClusterSpecBuilder().distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(NODE_NUM) + .setting("path.repo", new Supplier<>() { + @Override + @SuppressForbidden(reason = "TemporaryFolder only has io.File methods, not nio.File") + public String get() { + return repoDirectory.getRoot().getPath(); + } + }) + .setting("xpack.security.enabled", "false") + .feature(FeatureFlag.TIME_SERIES_MODE) + .configFile("operator/settings.json", Resource.fromString(settingsJSON)) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + + public FileSettingsUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + public void testFileSettingsApplied() throws IOException { + if (isUpgradedCluster()) { + // the nodes have all been upgraded. Check they read the file settings ok + Map response = responseAsMap(adminClient().performRequest(new Request("GET", "/_cluster/settings"))); + assertThat(XContentMapValues.extractValue(response, "persistent", "indices", "recovery", "max_bytes_per_sec"), equalTo("50mb")); + } + } +} diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java index 8803ad4af7348..250fe5a3a79fb 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java @@ -30,6 +30,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.Task; import java.util.ArrayList; import java.util.Collection; @@ -76,6 +77,10 @@ void runTest(Request request, String actionPrefix) throws Exception { createIndex("test", Settings.builder().put(BLOCK_SEARCHER_SETTING.getKey(), true).build()); ensureGreen("test"); + assert request.getOptions().containsHeader(Task.X_OPAQUE_ID_HTTP_HEADER) == false; + final var opaqueId = getTestClass().getSimpleName() + "-" + getTestName() + "-" + randomUUID(); + request.setOptions(request.getOptions().toBuilder().addHeader(Task.X_OPAQUE_ID_HTTP_HEADER, opaqueId)); + final List searcherBlocks = new ArrayList<>(); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { for (final IndexService indexService : indicesService) { @@ -96,7 +101,8 @@ void runTest(Request request, String actionPrefix) throws Exception { } final PlainActionFuture future = new PlainActionFuture<>(); - logger.info("--> sending request"); + logger.info("--> sending request, opaque id={}", opaqueId); + final Cancellable cancellable = getRestClient().performRequestAsync(request, wrapAsRestResponseListener(future)); awaitTaskWithPrefix(actionPrefix); @@ -108,7 +114,7 @@ void runTest(Request request, String actionPrefix) throws Exception { cancellable.cancel(); expectThrows(CancellationException.class, future::actionGet); - assertAllCancellableTasksAreCancelled(actionPrefix); + assertAllCancellableTasksAreCancelled(actionPrefix, opaqueId); } finally { Releasables.close(releasables); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json index 28c341d9983cc..a96be0d63834e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json @@ -1,7 +1,7 @@ { "capabilities": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/capabilities.html", + "url": "https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc#require-or-skip-api-capabilities", "description": "Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.get_categories.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.get_categories.json index 6dfa2e64dd293..69f8dd74e3d55 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.get_categories.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.get_categories.json @@ -30,7 +30,7 @@ } }, { - "path":"/_ml/anomaly_detectors/{job_id}/results/categories/", + "path":"/_ml/anomaly_detectors/{job_id}/results/categories", "methods":[ "GET", "POST" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.delete_rule.json similarity index 93% rename from rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.delete.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.delete_rule.json index 35f46132ae47f..8a97dcd311237 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.delete_rule.json @@ -1,10 +1,10 @@ { - "query_rule.delete": { + "query_rules.delete_rule": { "documentation": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-rule.html", "description": "Deletes an individual query rule within a ruleset." }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.delete_ruleset.json similarity index 91% rename from rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.delete.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.delete_ruleset.json index b8378ca9fc4e1..90144ca9f3cf5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.delete_ruleset.json @@ -1,10 +1,10 @@ { - "query_ruleset.delete": { + "query_rules.delete_ruleset": { "documentation": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html", "description": "Deletes a query ruleset." }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.get_rule.json similarity index 94% rename from rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.get.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.get_rule.json index ac7b97eca5fb5..681b68ab583d8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.get_rule.json @@ -1,10 +1,10 @@ { - "query_rule.get": { + "query_rules.get_rule": { "documentation": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-rule.html", "description": "Returns the details about an individual query rule within a ruleset." }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.get_ruleset.json similarity index 91% rename from rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.get.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.get_ruleset.json index 74f822a56a6bf..28268ea667b8c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.get_ruleset.json @@ -1,10 +1,10 @@ { - "query_ruleset.get": { + "query_rules.get_ruleset": { "documentation": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html", "description": "Returns the details about a query ruleset." }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.list_rulesets.json similarity index 92% rename from rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.list.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.list_rulesets.json index 81138b3823f78..e3e98adedb147 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.list_rulesets.json @@ -1,10 +1,10 @@ { - "query_ruleset.list": { + "query_rules.list_rulesets": { "documentation": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html", "description": "Lists query rulesets." }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.put_rule.json similarity index 95% rename from rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.put.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.put_rule.json index 4a2fee52d2805..5cc21b4f3249c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/query_rule.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.put_rule.json @@ -1,10 +1,10 @@ { - "query_rule.put": { + "query_rules.put_rule": { "documentation": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-rule.html", "description": "Creates or updates a query rule within a ruleset." }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.put_ruleset.json similarity index 93% rename from rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.put.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.put_ruleset.json index 2964da5f8a16f..12cbccc6b7651 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/query_ruleset.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/query_rules.put_ruleset.json @@ -1,10 +1,10 @@ { - "query_ruleset.put": { + "query_rules.put_ruleset": { "documentation": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html", "description": "Creates or updates a query ruleset." }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_jobs.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_jobs.json index 46ac1c4d304d1..e373c9f08bfd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_jobs.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_jobs.json @@ -24,7 +24,7 @@ } }, { - "path":"/_rollup/job/", + "path":"/_rollup/job", "methods":[ "GET" ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_rollup_caps.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_rollup_caps.json index 7dcc83ee0cd47..a72187f9ca926 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_rollup_caps.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rollup.get_rollup_caps.json @@ -24,7 +24,7 @@ } }, { - "path":"/_rollup/data/", + "path":"/_rollup/data", "methods":[ "GET" ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_delete_role.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_delete_role.json new file mode 100644 index 0000000000000..8810602aa2c18 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_delete_role.json @@ -0,0 +1,43 @@ +{ + "security.bulk_delete_role": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-delete-role.html", + "description": "Bulk delete roles in the native realm." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_security/role", + "methods": [ + "DELETE" + ] + } + ] + }, + "params": { + "refresh": { + "type": "enum", + "options": [ + "true", + "false", + "wait_for" + ], + "description": "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + }, + "body": { + "description": "The roles to delete", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_put_role.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_put_role.json new file mode 100644 index 0000000000000..f8916a48b31e6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.bulk_put_role.json @@ -0,0 +1,43 @@ +{ + "security.bulk_put_role": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-put-role.html", + "description": "Bulk adds and updates roles in the native realm." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_security/role", + "methods": [ + "POST" + ] + } + ] + }, + "params": { + "refresh": { + "type": "enum", + "options": [ + "true", + "false", + "wait_for" + ], + "description": "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + }, + "body": { + "description": "The roles to add", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.put_privileges.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.put_privileges.json index da63002b49485..8c920e10f285b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/security.put_privileges.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.put_privileges.json @@ -13,7 +13,7 @@ "url":{ "paths":[ { - "path":"/_security/privilege/", + "path":"/_security/privilege", "methods":[ "PUT", "POST" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.query_role.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.query_role.json new file mode 100644 index 0000000000000..d9f9d9f45ff69 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.query_role.json @@ -0,0 +1,33 @@ +{ + "security.query_role": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-role.html", + "description": "Retrieves information for Roles using a subset of query DSL" + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_security/_query/role", + "methods": [ + "GET", + "POST" + ] + } + ] + }, + "body": { + "description": "From, size, query, sort and search_after", + "required": false + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml new file mode 100644 index 0000000000000..9da6d2c5f086e --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml @@ -0,0 +1,59 @@ +--- +setup: + - requires: + cluster_features: "mapper.query_index_mode" + reason: "require index_mode" + + - do: + indices.create: + index: test_metrics + body: + settings: + index: + mode: time_series + routing_path: [container] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + container: + type: keyword + time_series_dimension: true + + - do: + indices.create: + index: test + body: + mappings: + properties: + "@timestamp": + type: date + +--- +Field-caps: + - do: + field_caps: + index: "test*" + fields: "*" + body: { index_filter: { term: { _index_mode: "time_series" } } } + - match: { indices: [ "test_metrics" ] } + - do: + field_caps: + index: "test*" + fields: "*" + body: { index_filter: { term: { _index_mode: "logs" } } } + - match: { indices: [ ] } + - do: + field_caps: + index: "test*" + fields: "*" + body: { index_filter: { term: { _index_mode: "standard" } } } + - match: { indices: [ "test" ] } + - do: + field_caps: + index: "test*" + fields: "*" + - match: { indices: [ "test" , "test_metrics" ] } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index b2b9e1b90cb3b..55605849de69c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -1062,8 +1062,8 @@ flattened field no doc values: --- flattened field with ignore_above: - requires: - cluster_features: ["gte_v8.8.0"] - reason: support for synthetic source on flattened fields added in 8.8.0 + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source - do: indices.create: @@ -1105,6 +1105,10 @@ flattened field with ignore_above: field: key1: key2: "key2" + key3: "key3_ignored" + key4: "key4_ignored" + key5: + key6: "key6_ignored" key7: "key7" - is_false: fields diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 9fc82eb125def..22deb7012c4ed 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -13,7 +13,7 @@ invalid: mode: synthetic properties: kwd: - type: keyword + type: boolean doc_values: false @@ -1155,3 +1155,52 @@ doubly nested object: - match: { hits.hits.2._source.nested_field.1.sub_nested_field.1.number: 42 } - length: { hits.hits.3._source: 1 } - match: { hits.hits.3._source.id: 3 } + + +--- +nested object with stored array: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + nested_array_regular: + type: nested + nested_array_stored: + type: nested + store_array_source: true + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "A", "nested_array_regular": [ { "b": [ { "c": 10 }, { "c": 100 } ] }, { "b": [ { "c": 20 }, { "c": 200 } ] } ] }' + - '{ "create": { } }' + - '{ "name": "B", "nested_array_stored": [ { "b": [ { "c": 10 }, { "c": 100 } ] }, { "b": [ { "c": 20 }, { "c": 200 } ] } ] }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.nested_array_regular.0.b.c: [ 10, 100] } + - match: { hits.hits.0._source.nested_array_regular.1.b.c: [ 20, 200] } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.nested_array_stored.0.b.0.c: 10 } + - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } + - match: { hits.hits.1._source.nested_array_stored.1.b.0.c: 20 } + - match: { hits.hits.1._source.nested_array_stored.1.b.1.c: 200 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml new file mode 100644 index 0000000000000..c88d638199dba --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml @@ -0,0 +1,90 @@ +--- +sort doc with nested object: + - requires: + cluster_features: ["mapper.index_sorting_on_nested"] + reason: uses index sorting on nested fields + - do: + indices.create: + index: test + body: + settings: + index.sort.field: name + mappings: + properties: + name: + type: keyword + nested_field: + type: nested + nested_array: + type: nested + other: + type: object + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "aaaa", "nested_field": {"a": 1, "b": 2}, "nested_array": [{ "a": 10, "b": 20 }, { "a": 100, "b": 200 }], "other": { "value": "A" } }' + - '{ "create": { } }' + - '{ "name": "cccc", "nested_field": {"a": 3, "b": 4}, "nested_array": [{ "a": 30, "b": 40 }, { "a": 300, "b": 400 }], "other": { "value": "C"} }' + - '{ "create": { } }' + - '{ "name": "bbbb", "nested_field": {"a": 5, "b": 6}, "nested_array": [{ "a": 50, "b": 60 }, { "a": 500, "b": 600 }], "other": { "value": "B"} }' + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._source.name: aaaa } + - match: { hits.hits.0._source.nested_field.a: 1 } + - match: { hits.hits.0._source.nested_field.b: 2 } + - match: { hits.hits.0._source.nested_array.0.a: 10 } + - match: { hits.hits.0._source.nested_array.0.b: 20 } + - match: { hits.hits.0._source.nested_array.1.a: 100 } + - match: { hits.hits.0._source.nested_array.1.b: 200 } + - match: { hits.hits.0._source.other.value: A } + - match: { hits.hits.1._source.name: bbbb } + - match: { hits.hits.1._source.nested_field.a: 5 } + - match: { hits.hits.1._source.nested_field.b: 6 } + - match: { hits.hits.1._source.nested_array.0.a: 50 } + - match: { hits.hits.1._source.nested_array.0.b: 60 } + - match: { hits.hits.1._source.nested_array.1.a: 500 } + - match: { hits.hits.1._source.nested_array.1.b: 600 } + - match: { hits.hits.1._source.other.value: B } + - match: { hits.hits.2._source.name: cccc } + - match: { hits.hits.2._source.nested_field.a: 3 } + - match: { hits.hits.2._source.nested_field.b: 4 } + - match: { hits.hits.2._source.nested_array.0.a: 30 } + - match: { hits.hits.2._source.nested_array.0.b: 40 } + - match: { hits.hits.2._source.nested_array.1.a: 300 } + - match: { hits.hits.2._source.nested_array.1.b: 400 } + - match: { hits.hits.2._source.other.value: C } + + +--- +sort doc on nested field: + - requires: + cluster_features: [ "mapper.index_sorting_on_nested" ] + reason: uses index sorting on nested fields + - do: + catch: /cannot apply index sort to field \[nested_field\.foo\] under nested object \[nested_field\]/ + indices.create: + index: test + body: + settings: + index.sort.field: nested_field.foo + index.sort.mode: min + mappings: + properties: + name: + type: keyword + nested_field: + type: nested + properties: + foo: + type: keyword + bar: + type: keyword diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index 5e8948b7fdea3..4976e5e15adbe 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -392,6 +392,7 @@ override sort mode settings: --- override sort field using nested field type in sorting: - requires: + cluster_features: ["mapper.index_sorting_on_nested"] test_runner_features: [ capabilities ] capabilities: - method: PUT @@ -433,11 +434,12 @@ override sort field using nested field type in sorting: - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.type: "illegal_argument_exception" } - - match: { error.reason: "cannot have nested fields when index sort is activated" } + - match: { error.reason: "cannot apply index sort to field [nested] under nested object [nested]" } --- override sort field using nested field type: - requires: + cluster_features: ["mapper.index_sorting_on_nested"] test_runner_features: [ capabilities ] capabilities: - method: PUT @@ -446,7 +448,6 @@ override sort field using nested field type: reason: "Support for 'logs' index mode capability required" - do: - catch: bad_request indices.create: index: test-nested body: @@ -474,10 +475,7 @@ override sort field using nested field type: properties: keywords: type: keyword - - - match: { error.root_cause.0.type: "illegal_argument_exception" } - - match: { error.type: "illegal_argument_exception" } - - match: { error.reason: "cannot have nested fields when index sort is activated" } + - is_false: error --- routing path not allowed in logs mode: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index ac0f8aec4f3d0..35089cc4c85a7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -417,7 +417,7 @@ - requires: test_runner_features: [arbitrary_key] - cluster_features: ["mapper.track_ignored_source"] + cluster_features: ["mapper.query_index_mode"] reason: "_ignored_source added to mappings" - do: @@ -478,34 +478,35 @@ # 6. _ignored # 7. _ignored_source # 8. _index - # 9. _nested_path - # 10. _routing - # 11. _seq_no - # 12. _source - # 13. _tier - # 14. _version - # 15. @timestamp - # 16. authors.age - # 17. authors.company - # 18. authors.company.keyword - # 19. authors.name.last_name - # 20. authors.name.first_name - # 21. authors.name.full_name - # 22. link - # 23. title - # 24. url + # 9. _index_mode + # 10. _nested_path + # 11. _routing + # 12. _seq_no + # 13. _source + # 14. _tier + # 15. _version + # 16. @timestamp + # 17. authors.age + # 18. authors.company + # 19. authors.company.keyword + # 20. authors.name.last_name + # 21. authors.name.first_name + # 22. authors.name.full_name + # 23. link + # 24. title + # 25. url # Object mappers: - # 25. authors - # 26. authors.name + # 26. authors + # 27. authors.name # Runtime field mappers: - # 27. a_source_field + # 28. a_source_field - - gte: { nodes.$node_id.indices.mappings.total_count: 27 } + - gte: { nodes.$node_id.indices.mappings.total_count: 28 } - is_true: nodes.$node_id.indices.mappings.total_estimated_overhead - gte: { nodes.$node_id.indices.mappings.total_estimated_overhead_in_bytes: 26624 } - - match: { nodes.$node_id.indices.indices.index1.mappings.total_count: 27 } + - match: { nodes.$node_id.indices.indices.index1.mappings.total_count: 28 } - is_true: nodes.$node_id.indices.indices.index1.mappings.total_estimated_overhead - - match: { nodes.$node_id.indices.indices.index1.mappings.total_estimated_overhead_in_bytes: 27648 } + - match: { nodes.$node_id.indices.indices.index1.mappings.total_estimated_overhead_in_bytes: 28672 } --- "indices mappings does not exist in shards level": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml new file mode 100644 index 0000000000000..a3d920d903ae8 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml @@ -0,0 +1,301 @@ +setup: + - requires: + cluster_features: "mapper.vectors.bit_vectors" + test_runner_features: close_to + reason: 'bit vectors added in 8.15' + - do: + indices.create: + index: test + body: + settings: + index: + number_of_shards: 2 + mappings: + properties: + name: + type: keyword + nested: + type: nested + properties: + paragraph_id: + type: keyword + vector: + type: dense_vector + dims: 40 + index: true + element_type: bit + similarity: l2_norm + + - do: + index: + index: test + id: "1" + body: + name: cow.jpg + nested: + - paragraph_id: 0 + vector: [100, 20, -34, 15, -100] + - paragraph_id: 1 + vector: [40, 30, -3, 1, -20] + + - do: + index: + index: test + id: "2" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [-1, 100, -13, 14, -127] + - paragraph_id: 2 + vector: [0, 100, 0, 15, -127] + - paragraph_id: 3 + vector: [0, 1, 0, 2, -15] + + - do: + index: + index: test + id: "3" + body: + name: rabbit.jpg + nested: + - paragraph_id: 0 + vector: [1, 111, -13, 14, -1] + + - do: + indices.refresh: {} + +--- +"nested kNN search only": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 14, -127] + k: 2 + num_candidates: 3 + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1.fields.name.0: "cow.jpg"} + + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 14, -127] + k: 2 + num_candidates: 3 + inner_hits: {size: 1, "fields": ["nested.paragraph_id"], _source: false} + + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1.fields.name.0: "cow.jpg"} + - match: {hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} + +--- +"nested kNN search filtered": + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 14, -127] + k: 2 + num_candidates: 3 + filter: {term: {name: "rabbit.jpg"}} + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 14, -127] + k: 3 + num_candidates: 3 + filter: {term: {name: "rabbit.jpg"}} + inner_hits: {size: 1, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} +--- +"nested kNN search inner_hits size > 1": + - do: + index: + index: test + id: "4" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [-1, 90, -10, 14, -127] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14, -127 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 2, -15 ] + + - do: + index: + index: test + id: "5" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -1, 100, -13, 14, -127 ] + - paragraph_id: 2 + vector: [ 0, 100, 0, 15, -127 ] + - paragraph_id: 3 + vector: [ 0, 1, 0, 2, -15 ] + + - do: + index: + index: test + id: "6" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -1, 100, -13, 15, -127 ] + - paragraph_id: 2 + vector: [ 0, 100, 0, 15, -127 ] + - paragraph_id: 3 + vector: [ 0, 1, 0, 2, -15 ] + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 15, -127] + k: 3 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 3} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + + - match: { hits.hits.0.fields.name.0: "moose.jpg" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 15, -127] + k: 5 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 5} + # All these initial matches are "moose.jpg", which has 3 nested vectors, but two are closest + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.1.fields.name.0: "moose.jpg"} + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.1.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.2.fields.name.0: "moose.jpg"} + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.2.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.2.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.3.fields.name.0: "moose.jpg"} + - length: { hits.hits.3.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.3.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.3.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + # Rabbit only has one passage vector + - match: {hits.hits.4.fields.name.0: "cow.jpg"} + - length: { hits.hits.4.inner_hits.nested.hits.hits: 2 } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [ -1, 90, -10, 15, -127 ] + k: 3 + num_candidates: 3 + filter: {term: {name: "cow.jpg"}} + inner_hits: {size: 3, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 1} + - match: { hits.hits.0._id: "1" } + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "1" } +--- +"nested kNN search inner_hits & boosting": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 15, -127] + k: 3 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - close_to: { hits.hits.0._score: {value: 0.8, error: 0.00001} } + - close_to: { hits.hits.0.inner_hits.nested.hits.hits.0._score: {value: 0.8, error: 0.00001} } + - close_to: { hits.hits.1._score: {value: 0.625, error: 0.00001} } + - close_to: { hits.hits.1.inner_hits.nested.hits.hits.0._score: {value: 0.625, error: 0.00001} } + - close_to: { hits.hits.2._score: {value: 0.5, error: 0.00001} } + - close_to: { hits.hits.2.inner_hits.nested.hits.hits.0._score: {value: 0.5, error: 0.00001} } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-1, 90, -10, 15, -127] + k: 3 + num_candidates: 5 + boost: 2 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + - close_to: { hits.hits.0._score: {value: 1.6, error: 0.00001} } + - close_to: { hits.hits.0.inner_hits.nested.hits.hits.0._score: {value: 1.6, error: 0.00001} } + - close_to: { hits.hits.1._score: {value: 1.25, error: 0.00001} } + - close_to: { hits.hits.1.inner_hits.nested.hits.hits.0._score: {value: 1.25, error: 0.00001} } + - close_to: { hits.hits.2._score: {value: 1, error: 0.00001} } + - close_to: { hits.hits.2.inner_hits.nested.hits.hits.0._score: {value: 1.0, error: 0.00001} } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml index 28ecd8ef59c02..d52a5daf22344 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml @@ -26,15 +26,6 @@ setup: my_name: type: keyword store: true - aliases: - my_alias: - filter: - term: - my_name: v2 - my_alias1: - filter: - term: - my_name: v1 - do: bulk: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml index 74fbe221c0fe7..f989e17e6ec30 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/170_knn_search_hex_encoded_byte_vectors.yml @@ -116,8 +116,9 @@ setup: --- "Knn search with hex string for byte field - dimensions mismatch" : # [64, 10, -30, 10] - is encoded as '400ae20a' + # the error message has been adjusted in later versions - do: - catch: /the query vector has a different dimension \[4\] than the index vectors \[3\]/ + catch: /dimension|dimensions \[4\] than the document|index vectors \[3\]/ search: index: knn_hex_vector_index body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml index e01f3ec18b8c3..cd94275234661 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/175_knn_query_hex_encoded_byte_vectors.yml @@ -116,8 +116,9 @@ setup: --- "Knn query with hex string for byte field - dimensions mismatch" : # [64, 10, -30, 10] - is encoded as '400ae20a' + # the error message has been adjusted in later versions - do: - catch: /the query vector has a different dimension \[4\] than the index vectors \[3\]/ + catch: /dimension|dimensions \[4\] than the document|index vectors \[3\]/ search: index: knn_hex_vector_index body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml new file mode 100644 index 0000000000000..f6538b573809a --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml @@ -0,0 +1,262 @@ +# test how knn query interact with other queries +setup: + - requires: + cluster_features: "search.vectors.k_param_supported" + reason: 'k param for knn as query is required' + test_runner_features: close_to + + - do: + indices.create: + index: my_index + body: + settings: + number_of_shards: 1 + mappings: + dynamic: false + properties: + my_vector: + type: dense_vector + dims: 4 + index : true + similarity : l2_norm + index_options: + type: hnsw + m: 16 + ef_construction: 200 + my_name: + type: keyword + store: true + + - do: + bulk: + refresh: true + index: my_index + body: + - '{"index": {"_id": "1"}}' + - '{"my_vector": [1, 1, 1, 1], "my_name": "v1"}' + - '{"index": {"_id": "2"}}' + - '{"my_vector": [1, 1, 1, 2], "my_name": "v2"}' + - '{"index": {"_id": "3"}}' + - '{"my_vector": [1, 1, 1, 3], "my_name": "v1"}' + - '{"index": {"_id": "4"}}' + - '{"my_vector": [1, 1, 1, 4], "my_name": "v2"}' + - '{"index": {"_id": "5"}}' + - '{"my_vector": [1, 1, 1, 5], "my_name": "v1"}' + - '{"index": {"_id": "6"}}' + - '{"my_vector": [1, 1, 1, 6], "my_name": "v2"}' + - '{"index": {"_id": "7"}}' + - '{"my_vector": [1, 1, 1, 7], "my_name": "v1"}' + - '{"index": {"_id": "8"}}' + - '{"my_vector": [1, 1, 1, 8], "my_name": "v2"}' + - '{"index": {"_id": "9"}}' + - '{"my_vector": [1, 1, 1, 9], "my_name": "v1"}' + - '{"index": {"_id": "10"}}' + - '{"my_vector": [1, 1, 1, 10], "my_name": "v2"}' + +--- +"Simple knn query with k param": + - do: + search: + index: my_index + body: + query: + knn: + field: my_vector + query_vector: [1, 1, 1, 1] + k: 5 + + - match: { hits.total.value: 5 } # collector sees k docs + - length: {hits.hits: 5} # k docs retrieved + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.3._id: "4" } + - match: { hits.hits.4._id: "5" } + + - do: + search: + index: my_index + body: + size: 3 + query: + knn: + field: my_vector + query_vector: [ 1, 1, 1, 1 ] + k: 5 + + - match: { hits.total.value: 5 } # collector sees k docs + - length: { hits.hits: 3 } # size docs retrieved + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + search: + index: my_index + body: + size: 3 + query: + knn: + field: my_vector + query_vector: [ 1, 1, 1, 1 ] + k: 5 + num_candidates: 10 + + - match: { hits.total.value: 5 } # collector sees k docs + - length: { hits.hits: 3 } # size docs retrieved + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + +--- +"Knn query within the standard retriever": + - do: + search: + index: my_index + body: + retriever: + standard: + filter: + bool: + must: + term: + my_name: "v1" + query: + knn: + field: my_vector + query_vector: [ 1, 1, 1, 1 ] + k: 10 + - match: { hits.total.value: 5 } # docs that pass post-filter + - length: { hits.hits: 5 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "5" } + - match: { hits.hits.3._id: "7" } + - match: { hits.hits.4._id: "9" } + +--- +"Incorrect k param": + - do: + catch: bad_request + search: + index: my_index + body: + query: + knn: + field: my_vector + query_vector: [ 1, 1, 1, 1 ] + k: 5 + num_candidates: 3 + - match: { status: 400 } + - match: { error.type: "x_content_parse_exception" } + - match: { error.caused_by.type: "illegal_argument_exception" } + - match: { error.caused_by.reason: "[num_candidates] cannot be less than [k]" } + + - do: + catch: bad_request + search: + index: my_index + body: + query: + knn: + field: my_vector + query_vector: [ 1, 1, 1, 1 ] + k: 0 + - match: { status: 400 } + - match: { error.type: "x_content_parse_exception" } + - match: { error.caused_by.type: "illegal_argument_exception" } + - match: { error.caused_by.reason: "[k] must be greater than 0" } + +--- +"Function score query with knn query with k param": + # find top 5 knn docs, then boost docs with name v1 by 10 and docs with name v2 by 100 + - do: + search: + index: my_index + body: + size: 3 + fields: [ my_name ] + query: + function_score: + query: + knn: + field: my_vector + query_vector: [ 1, 1, 1, 1 ] + k : 5 + functions: + - filter: { match: { my_name: v1 } } + weight: 10 + - filter: { match: { my_name: v2 } } + weight: 100 + boost_mode: multiply + + - match: { hits.total.value: 5 } # collector sees k docs + - length: { hits.hits: 3 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.0.fields.my_name.0: v2 } + - close_to: { hits.hits.0._score: { value: 50.0, error: 0.001 } } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1.fields.my_name.0: v1 } + - close_to: { hits.hits.1._score: { value: 10.0, error: 0.001 } } + - match: { hits.hits.2._id: "4" } + - match: { hits.hits.2.fields.my_name.0: v2 } + - close_to: { hits.hits.2._score: { value: 10.0, error: 0.001 } } + +--- +"dis_max query with knn query": + - do: + search: + index: my_index + body: + size: 10 + fields: [ my_name ] + query: + dis_max: + queries: + - knn: { field: my_vector, query_vector: [ 1, 1, 1, 1 ], k: 5, num_candidates: 10 } + - match: { my_name: v2 } + tie_breaker: 0.8 + + - match: { hits.total.value: 8 } # 5 knn results + extra results from match query + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.0.fields.my_name.0: v2 } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1.fields.my_name.0: v1 } + - match: { hits.hits.2._id: "4" } + - match: { hits.hits.2.fields.my_name.0: v2 } + - match: { hits.hits.3._id: "6" } + - match: { hits.hits.3.fields.my_name.0: v2 } + - match: { hits.hits.4._id: "8" } + - match: { hits.hits.4.fields.my_name.0: v2 } + - match: { hits.hits.5._id: "10" } + - match: { hits.hits.5.fields.my_name.0: v2 } + - match: { hits.hits.6._id: "3" } + - match: { hits.hits.6.fields.my_name.0: v1 } + - match: { hits.hits.7._id: "5" } + - match: { hits.hits.7.fields.my_name.0: v1 } + +--- +"Aggregations with collected number of docs depends on k param": + - do: + search: + index: my_index + body: + size: 2 + query: + knn: + field: my_vector + query_vector: [1, 1, 1, 1] + k: 5 # collect 5 results from each shard + aggs: + my_agg: + terms: + field: my_name + order: + _key: asc + + - length: {hits.hits: 2} + - match: {hits.total.value: 5} + - match: {aggregations.my_agg.buckets.0.key: 'v1'} + - match: {aggregations.my_agg.buckets.1.key: 'v2'} + - match: {aggregations.my_agg.buckets.0.doc_count: 3} + - match: {aggregations.my_agg.buckets.1.doc_count: 2} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index 7f0c24e217d14..825bcecf33fce 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -287,6 +287,9 @@ setup: - requires: cluster_features: "gte_v8.4.0" reason: 'kNN added to search endpoint in 8.4' + - skip: + cluster_features: "gte_v8.16.0" + reason: 'non-existent field handling improved in 8.16' - do: catch: bad_request search: @@ -298,9 +301,28 @@ setup: query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 + - match: { error.root_cause.0.type: "query_shard_exception" } - match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" } +--- +"Test nonexistent field is match none": + - requires: + cluster_features: "gte_v8.16.0" + reason: 'non-existent field handling improved in 8.16' + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nonexistent + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 2 + num_candidates: 3 + + - length: {hits.hits: 0} + --- "KNN Vector similarity search only": - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml index 24437e3db1379..5f1af2ca5c52f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml @@ -204,7 +204,7 @@ setup: num_candidates: 3 k: 3 field: vector - similarity: 10.3 + similarity: 17 query_vector: [-0.5, 90.0, -10, 14.8] - length: {hits.hits: 1} @@ -222,7 +222,7 @@ setup: num_candidates: 3 k: 3 field: vector - similarity: 11 + similarity: 17 query_vector: [-0.5, 90.0, -10, 14.8] filter: {"term": {"name": "moose.jpg"}} @@ -428,7 +428,7 @@ setup: index: hnsw_byte_quantized_merge_cosine id: "1" body: - embedding: [1.0, 1.0, 1.0, 1.0] + embedding: [0.5, 0.5, 0.5, 0.5, 0.5, 1.0] # Flush in order to provoke a merge later - do: @@ -439,7 +439,7 @@ setup: index: hnsw_byte_quantized_merge_cosine id: "2" body: - embedding: [1.0, 1.0, 1.0, 2.0] + embedding: [0.0, 0.0, 0.0, 1.0, 1.0, 0.5] # Flush in order to provoke a merge later - do: @@ -450,7 +450,7 @@ setup: index: hnsw_byte_quantized_merge_cosine id: "3" body: - embedding: [1.0, 1.0, 1.0, 3.0] + embedding: [0.0, 0.0, 0.0, 0.0, 0.0, 10.5] - do: indices.forcemerge: @@ -468,7 +468,7 @@ setup: query: knn: field: embedding - query_vector: [1.0, 1.0, 1.0, 1.0] + query_vector: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] num_candidates: 10 - length: { hits.hits: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml new file mode 100644 index 0000000000000..ed469ffd7ff16 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml @@ -0,0 +1,356 @@ +setup: + - requires: + cluster_features: "mapper.vectors.bit_vectors" + reason: 'mapper.vectors.bit_vectors' + + - do: + indices.create: + index: test + body: + mappings: + properties: + name: + type: keyword + vector: + type: dense_vector + element_type: bit + dims: 40 + index: true + similarity: l2_norm + + - do: + index: + index: test + id: "1" + body: + name: cow.jpg + vector: [2, -1, 1, 4, -3] + + - do: + index: + index: test + id: "2" + body: + name: moose.jpg + vector: [127.0, -128.0, 0.0, 1.0, -1.0] + + - do: + index: + index: test + id: "3" + body: + name: rabbit.jpg + vector: [5, 4.0, 3, 2.0, 127] + + - do: + indices.refresh: {} + +--- +"kNN search only": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127, 127, -128, -128, 127] + k: 2 + num_candidates: 3 + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1.fields.name.0: "cow.jpg"} + +--- +"kNN search plus query": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127.0, -128.0, 0.0, 1.0, -1.0] + k: 2 + num_candidates: 3 + query: + term: + name: rabbit.jpg + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "moose.jpg"} + +--- +"kNN search with filter": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [5.0, 4, 3.0, 2, 127.0] + k: 2 + num_candidates: 3 + filter: + term: + name: "rabbit.jpg" + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [2, -1, 1, 4, -3] + k: 2 + num_candidates: 3 + filter: + - term: + name: "rabbit.jpg" + - term: + _id: 2 + + - match: {hits.total.value: 0} + +--- +"Vector similarity search only": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + num_candidates: 3 + k: 3 + field: vector + similarity: 0.98 + query_vector: [5, 4.0, 3, 2.0, 127] + + - length: {hits.hits: 1} + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} +--- +"Vector similarity with filter only": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + num_candidates: 3 + k: 3 + field: vector + similarity: 0.98 + query_vector: [5, 4.0, 3, 2.0, 127] + filter: {"term": {"name": "rabbit.jpg"}} + + - length: {hits.hits: 1} + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + num_candidates: 3 + k: 3 + field: vector + similarity: 0.98 + query_vector: [5, 4.0, 3, 2.0, 127] + filter: {"term": {"name": "cow.jpg"}} + + - length: {hits.hits: 0} +--- +"dim mismatch": + - do: + catch: bad_request + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [1, 2, 3, 4, 5, 6] + k: 2 + num_candidates: 3 +--- +"disallow quantized vector types": + - do: + catch: bad_request + indices.create: + index: test + body: + mappings: + properties: + name: + type: keyword + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int8_flat + + - do: + catch: bad_request + indices.create: + index: test + body: + mappings: + properties: + name: + type: keyword + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int4_flat + + - do: + catch: bad_request + indices.create: + index: test + body: + mappings: + properties: + name: + type: keyword + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int8_hnsw + + - do: + catch: bad_request + indices.create: + index: test + body: + mappings: + properties: + name: + type: keyword + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int4_hnsw +--- +"disallow vector index type change to quantized type": + - do: + catch: bad_request + indices.put_mapping: + index: test + body: + properties: + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int4_hnsw + - do: + catch: bad_request + indices.put_mapping: + index: test + body: + properties: + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int8_hnsw +--- +"Defaults to l2_norm with bit vectors": + - do: + indices.create: + index: default_to_l2_norm_bit + body: + mappings: + properties: + vector: + type: dense_vector + element_type: bit + dims: 40 + index: true + + - do: + indices.get_mapping: + index: default_to_l2_norm_bit + + - match: { default_to_l2_norm_bit.mappings.properties.vector.similarity: l2_norm } + +--- +"Only allow l2_norm with bit vectors": + - do: + catch: bad_request + indices.create: + index: dot_product_fails_for_bits + body: + mappings: + properties: + vector: + type: dense_vector + element_type: bit + dims: 40 + index: true + similarity: dot_product + + - do: + catch: bad_request + indices.create: + index: cosine_product_fails_for_bits + body: + mappings: + properties: + vector: + type: dense_vector + element_type: bit + dims: 40 + index: true + similarity: cosine + + - do: + catch: bad_request + indices.create: + index: cosine_product_fails_for_bits + body: + mappings: + properties: + type: dense_vector + element_type: bit + dims: 40 + index: true + similarity: max_inner_product diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml new file mode 100644 index 0000000000000..ec7bde4de8435 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml @@ -0,0 +1,223 @@ +setup: + - requires: + cluster_features: "mapper.vectors.bit_vectors" + reason: 'mapper.vectors.bit_vectors' + + - do: + indices.create: + index: test + body: + mappings: + properties: + name: + type: keyword + vector: + type: dense_vector + element_type: bit + dims: 40 + index: true + similarity: l2_norm + index_options: + type: flat + + - do: + index: + index: test + id: "1" + body: + name: cow.jpg + vector: [2, -1, 1, 4, -3] + + - do: + index: + index: test + id: "2" + body: + name: moose.jpg + vector: [127.0, -128.0, 0.0, 1.0, -1.0] + + - do: + index: + index: test + id: "3" + body: + name: rabbit.jpg + vector: [5, 4.0, 3, 2.0, 127] + + - do: + indices.refresh: {} + +--- +"kNN search only": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127, 127, -128, -128, 127] + k: 2 + num_candidates: 3 + + - match: {hits.hits.0._id: "2"} + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1.fields.name.0: "cow.jpg"} + +--- +"kNN search plus query": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127.0, -128.0, 0.0, 1.0, -1.0] + k: 2 + num_candidates: 3 + query: + term: + name: rabbit.jpg + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "moose.jpg"} + +--- +"kNN search with filter": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [5.0, 4, 3.0, 2, 127.0] + k: 2 + num_candidates: 3 + filter: + term: + name: "rabbit.jpg" + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [2, -1, 1, 4, -3] + k: 2 + num_candidates: 3 + filter: + - term: + name: "rabbit.jpg" + - term: + _id: 2 + + - match: {hits.total.value: 0} + +--- +"Vector similarity search only": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + num_candidates: 3 + k: 3 + field: vector + similarity: 0.98 + query_vector: [5, 4.0, 3, 2.0, 127] + + - length: {hits.hits: 1} + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} +--- +"Vector similarity with filter only": + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + num_candidates: 3 + k: 3 + field: vector + similarity: 0.98 + query_vector: [5, 4.0, 3, 2.0, 127] + filter: {"term": {"name": "rabbit.jpg"}} + + - length: {hits.hits: 1} + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + num_candidates: 3 + k: 3 + field: vector + similarity: 0.98 + query_vector: [5, 4.0, 3, 2.0, 127] + filter: {"term": {"name": "cow.jpg"}} + + - length: {hits.hits: 0} +--- +"dim mismatch": + - do: + catch: bad_request + search: + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [1, 2, 3, 4, 5, 6] + k: 2 + num_candidates: 3 +--- +"disallow vector index type change to quantized type": + - do: + catch: bad_request + indices.put_mapping: + index: test + body: + properties: + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int4_hnsw + - do: + catch: bad_request + indices.put_mapping: + index: test + body: + properties: + vector: + type: dense_vector + element_type: bit + dims: 32 + index: true + similarity: l2_norm + index_options: + type: int8_hnsw diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml index 983ac2719e71b..806e5ff73b355 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml @@ -148,6 +148,9 @@ setup: --- "Test nonexistent field": + - skip: + cluster_features: 'gte_v8.16.0' + reason: 'non-existent field handling improved in 8.16' - do: catch: bad_request search: @@ -159,8 +162,26 @@ setup: query_vector: [ 1, 0, 0, 0, -1 ] k: 2 num_candidates: 3 + - match: { error.root_cause.0.type: "query_shard_exception" } - match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" } +--- +"Test nonexistent field is match none": + - requires: + cluster_features: 'gte_v8.16.0' + reason: 'non-existent field handling improved in 8.16' + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nonexistent + query_vector: [ 1, 0, 0, 0, -1 ] + k: 2 + num_candidates: 3 + + - length: {hits.hits: 0} --- "Vector similarity search only": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml index 0ed97d1b0f308..3494f33466ce4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml @@ -55,6 +55,38 @@ setup: - synonyms: "bye => goodbye" id: "test-id-2" +--- +"Pagination - invalid size and from": + - do: + catch: bad_request + synonyms.get_synonym: + id: test-get-synonyms + size: -1 + + - do: + catch: bad_request + synonyms.get_synonym: + id: test-get-synonyms + from: -1 + +--- +"Pagination - synonym max size exceeded": + - requires: + cluster_features: ["gte_v8.15.0"] + reason: Synonym size checks introduced in 8.15.0 + + - do: + catch: bad_request + synonyms.get_synonym: + id: test-get-synonyms + size: 100001 + + - do: + catch: bad_request + synonyms.get_synonym: + id: test-get-synonyms + from: 100001 + --- "Pagination - from": - do: @@ -71,7 +103,6 @@ setup: - synonyms: "test => check" id: "test-id-3" - --- "Synonyms set with same IDs": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml index 485b5b1796ec4..46476fd071b30 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -336,6 +336,67 @@ set start_time and end_time without timeseries mode: time_series: end_time: 1632625782000 +--- +set start_time, end_time and routing_path via put settings api without time_series mode: + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: bug fixed in 8.15.0 + + - do: + indices.create: + index: test-index + - match: { acknowledged: true } + + - do: + catch: /\[index.time_series.end_time\] requires \[index.mode=time_series\]/ + indices.put_settings: + index: test-index + body: + index.time_series.end_time: 1632625782000 + + - do: + catch: /Can't update non dynamic settings \[\[index.time_series.start_time\]\] for open indices/ + indices.put_settings: + index: test-index + body: + index.time_series.start_time: 1632625782000 + + - do: + catch: /Can't update non dynamic settings \[\[index.routing_path\]\] for open indices/ + indices.put_settings: + index: test-index + body: + settings: + index: + routing_path: foo + + - do: + indices.close: + index: test-index + + - do: + catch: /\[index.time_series.end_time\] requires \[index.mode=time_series\]/ + indices.put_settings: + index: test-index + body: + index.time_series.end_time: 1632625782000 + + - do: + catch: /final test-index setting \[index.time_series.start_time\], not updateable/ + indices.put_settings: + index: test-index + body: + index.time_series.start_time: 1632625782000 + + - do: + catch: /final test-index setting \[index.routing_path\], not updateable/ + indices.put_settings: + index: test-index + body: + settings: + index: + routing_path: foo + --- set bad start_time and end_time: - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml index 3c76653960386..dae50704dd0d0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml @@ -337,3 +337,20 @@ sort by tsid: - match: {hits.hits.7.sort: ["KCjEJ9R_BgO8TRX2QOd6dpR12oDh--qoyNZRQPy43y34Qdy2dpsyG0o", 1619635864467]} - match: {hits.hits.7.fields._tsid: [ "KCjEJ9R_BgO8TRX2QOd6dpR12oDh--qoyNZRQPy43y34Qdy2dpsyG0o"]} + +--- +aggs by index_mode: + - requires: + cluster_features: ["mapper.query_index_mode"] + reason: require _index_mode metadata field + - do: + search: + index: test + body: + aggs: + modes: + terms: + field: "_index_mode" + - match: {aggregations.modes.buckets.0.key: "time_series"} + - match: {aggregations.modes.buckets.0.doc_count: 8} + diff --git a/server/build.gradle b/server/build.gradle index 09753cfc32c74..40cd9d31dd6c8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -40,8 +40,6 @@ dependencies { implementation project(":libs:elasticsearch-simdvec") implementation project(':libs:elasticsearch-plugin-classloader') - // no compile dependency by server, but server defines security policy for this codebase so it i> - runtimeOnly project(":libs:elasticsearch-preallocate") // lucene api "org.apache.lucene:lucene-core:${versions.lucene}" @@ -82,7 +80,8 @@ dependencies { internalClusterTestImplementation(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'server' } - + internalClusterTestImplementation(project(':modules:reindex')) + internalClusterTestImplementation(project(':modules:mapper-extras')) } spotless { @@ -147,6 +146,18 @@ if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.insecure_network_trace_enabled', 'true' + excludes << '**/IndexSettingsOverrideTests.class' +} + +TaskProvider indexSettingsOverrideTest = tasks.register("indexSettingsOverrideTest", Test) { + include '**/IndexSettingsOverrideTests.class' + systemProperty 'es.stateless.allow.index.refresh_interval.override', 'true' + classpath = sourceSets.test.runtimeClasspath + testClassesDirs = sourceSets.test.output.classesDirs +} + +tasks.named("check").configure { + dependsOn(indexSettingsOverrideTest) } tasks.named("thirdPartyAudit").configure { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 4ad2a56d2e979..32d8be475dbbe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -546,13 +546,7 @@ public void testListTasksWaitForCompletion() throws Exception { // This ensures that a task has progressed to the point of listing all running tasks and subscribing to their updates for (var threadPool : internalCluster().getInstances(ThreadPool.class)) { - var max = threadPool.info(ThreadPool.Names.MANAGEMENT).getMax(); - var executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); - var waitForManagementToCompleteAllTasks = new CyclicBarrier(max + 1); - for (int i = 0; i < max; i++) { - executor.submit(() -> safeAwait(waitForManagementToCompleteAllTasks)); - } - safeAwait(waitForManagementToCompleteAllTasks); + flushThreadPoolExecutor(threadPool, ThreadPool.Names.MANAGEMENT); } return future; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index 4a076cb3b6e66..60462863dd09a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -29,10 +29,8 @@ import java.util.Collection; import java.util.List; -import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -102,7 +100,7 @@ public void testWaitForCompletion() throws Exception { })); // briefly fill up the management pool so that (a) we know the wait has started and (b) we know it's not blocking - flushThreadPool(threadPool, ThreadPool.Names.MANAGEMENT); + flushThreadPoolExecutor(threadPool, ThreadPool.Names.MANAGEMENT); final var getWaitFuture = new PlainActionFuture(); clusterAdmin().prepareGetTask(task.taskId()).setWaitForCompletion(true).execute(getWaitFuture.delegateFailure((l, getResult) -> { @@ -127,16 +125,6 @@ public void testWaitForCompletion() throws Exception { getWaitFuture.get(10, TimeUnit.SECONDS); } - private void flushThreadPool(ThreadPool threadPool, String executor) throws InterruptedException, BrokenBarrierException, - TimeoutException { - var maxThreads = threadPool.info(executor).getMax(); - var barrier = new CyclicBarrier(maxThreads + 1); - for (int i = 0; i < maxThreads; i++) { - threadPool.executor(executor).execute(() -> safeAwait(barrier)); - } - barrier.await(10, TimeUnit.SECONDS); - } - @Override protected Collection> getPlugins() { return List.of(TestPlugin.class); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 48f1ecb072314..4d52383bfc4e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -832,30 +832,22 @@ public void testRolloverConcurrently() throws Exception { assertAcked(client().execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet()); final CyclicBarrier barrier = new CyclicBarrier(numOfThreads); - final Thread[] threads = new Thread[numOfThreads]; - for (int i = 0; i < numOfThreads; i++) { + runInParallel(numOfThreads, i -> { var aliasName = "test-" + i; - threads[i] = new Thread(() -> { - assertAcked(prepareCreate(aliasName + "-000001").addAlias(new Alias(aliasName).writeIndex(true)).get()); - for (int j = 1; j <= numberOfRolloversPerThread; j++) { - try { - barrier.await(); - } catch (Exception e) { - throw new RuntimeException(e); - } - var response = indicesAdmin().prepareRolloverIndex(aliasName).waitForActiveShards(ActiveShardCount.NONE).get(); - assertThat(response.getOldIndex(), equalTo(aliasName + Strings.format("-%06d", j))); - assertThat(response.getNewIndex(), equalTo(aliasName + Strings.format("-%06d", j + 1))); - assertThat(response.isDryRun(), equalTo(false)); - assertThat(response.isRolledOver(), equalTo(true)); + assertAcked(prepareCreate(aliasName + "-000001").addAlias(new Alias(aliasName).writeIndex(true)).get()); + for (int j = 1; j <= numberOfRolloversPerThread; j++) { + try { + barrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); } - }); - threads[i].start(); - } - - for (Thread thread : threads) { - thread.join(); - } + var response = indicesAdmin().prepareRolloverIndex(aliasName).waitForActiveShards(ActiveShardCount.NONE).get(); + assertThat(response.getOldIndex(), equalTo(aliasName + Strings.format("-%06d", j))); + assertThat(response.getNewIndex(), equalTo(aliasName + Strings.format("-%06d", j + 1))); + assertThat(response.isDryRun(), equalTo(false)); + assertThat(response.isRolledOver(), equalTo(true)); + } + }); for (int i = 0; i < numOfThreads; i++) { var aliasName = "test-" + i; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java new file mode 100644 index 0000000000000..6a4e973d8fcc5 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.apache.lucene.tests.mockfile.FilterFileChannel; +import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.core.PathUtilsForTesting; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.FileSystem; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class BulkAfterWriteFsyncFailureIT extends ESSingleNodeTestCase { + private static FSyncFailureFileSystemProvider fsyncFailureFileSystemProvider; + + @BeforeClass + public static void installDisruptFSyncFS() { + FileSystem current = PathUtils.getDefaultFileSystem(); + fsyncFailureFileSystemProvider = new FSyncFailureFileSystemProvider(current); + PathUtilsForTesting.installMock(fsyncFailureFileSystemProvider.getFileSystem(null)); + } + + @AfterClass + public static void removeDisruptFSyncFS() { + PathUtilsForTesting.teardown(); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110551") + public void testFsyncFailureDoesNotAdvanceLocalCheckpoints() { + String indexName = randomIdentifier(); + client().admin() + .indices() + .prepareCreate(indexName) + .setSettings( + Settings.builder() + .put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMapping("key", "type=keyword", "val", "type=long") + .get(); + ensureGreen(indexName); + + var localCheckpointBeforeBulk = getLocalCheckpointForShard(indexName, 0); + fsyncFailureFileSystemProvider.failFSyncOnce(true); + var bulkResponse = client().prepareBulk().add(prepareIndex(indexName).setId("1").setSource("key", "foo", "val", 10)).get(); + assertTrue(bulkResponse.hasFailures()); + var localCheckpointAfterFailedBulk = getLocalCheckpointForShard(indexName, 0); + // fsync for the translog failed, hence the checkpoint doesn't advance + assertThat(localCheckpointBeforeBulk, equalTo(localCheckpointAfterFailedBulk)); + + // Since background refreshes are disabled, the shard is considered green until the next operation is appended into the translog + ensureGreen(indexName); + + // If the after write fsync fails, it'll fail the TranslogWriter but not the Engine, we'll need to try to append a new operation + // into the translog so the exception bubbles up and fails the engine. On the other hand, the TranslogReplicationAction will retry + // this action on AlreadyClosedExceptions, that's why the operation ends up succeeding even after the engine failed. + var bulkResponse2 = client().prepareBulk().add(prepareIndex(indexName).setId("2").setSource("key", "bar", "val", 20)).get(); + assertFalse(bulkResponse2.hasFailures()); + + var localCheckpointAfterSuccessfulBulk = getLocalCheckpointForShard(indexName, 0); + assertThat(localCheckpointAfterSuccessfulBulk, is(greaterThan(localCheckpointAfterFailedBulk))); + } + + long getLocalCheckpointForShard(String index, int shardId) { + var indicesService = getInstanceFromNode(IndicesService.class); + var indexShard = indicesService.indexServiceSafe(resolveIndex(index)).getShard(shardId); + return indexShard.getLocalCheckpoint(); + } + + public static class FSyncFailureFileSystemProvider extends FilterFileSystemProvider { + private final AtomicBoolean failFSyncs = new AtomicBoolean(); + + public FSyncFailureFileSystemProvider(FileSystem delegate) { + super("fsyncfailure://", delegate); + } + + public void failFSyncOnce(boolean shouldFail) { + failFSyncs.set(shouldFail); + } + + @Override + public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + return new FilterFileChannel(super.newFileChannel(path, options, attrs)) { + + @Override + public void force(boolean metaData) throws IOException { + if (failFSyncs.compareAndSet(true, false)) { + throw new IOException("simulated"); + } + super.force(metaData); + } + }; + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 00bd6ee7ee891..5251f171150b7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -39,7 +39,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.CyclicBarrier; import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -518,34 +517,17 @@ public void testFailingVersionedUpdatedOnBulk() throws Exception { createIndex("test"); indexDoc("test", "1", "field", "1"); final BulkResponse[] responses = new BulkResponse[30]; - final CyclicBarrier cyclicBarrier = new CyclicBarrier(responses.length); - Thread[] threads = new Thread[responses.length]; - - for (int i = 0; i < responses.length; i++) { - final int threadID = i; - threads[threadID] = new Thread(() -> { - try { - cyclicBarrier.await(); - } catch (Exception e) { - return; - } - BulkRequestBuilder requestBuilder = client().prepareBulk(); - requestBuilder.add( - client().prepareUpdate("test", "1") - .setIfSeqNo(0L) - .setIfPrimaryTerm(1) - .setDoc(Requests.INDEX_CONTENT_TYPE, "field", threadID) - ); - responses[threadID] = requestBuilder.get(); - }); - threads[threadID].start(); - - } - - for (int i = 0; i < threads.length; i++) { - threads[i].join(); - } + startInParallel(responses.length, threadID -> { + BulkRequestBuilder requestBuilder = client().prepareBulk(); + requestBuilder.add( + client().prepareUpdate("test", "1") + .setIfSeqNo(0L) + .setIfPrimaryTerm(1) + .setDoc(Requests.INDEX_CONTENT_TYPE, "field", threadID) + ); + responses[threadID] = requestBuilder.get(); + }); int successes = 0; for (BulkResponse response : responses) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java index 227a3b8612331..e5dca62a97494 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import java.util.ArrayList; import java.util.Arrays; @@ -38,6 +39,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; +@TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/109830", + value = "org.elasticsearch.action.search:TRACE," + "org.elasticsearch.search.SearchService:TRACE" +) public class SearchProgressActionListenerIT extends ESSingleNodeTestCase { private List shards; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java new file mode 100644 index 0000000000000..7211585d766f4 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java @@ -0,0 +1,279 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support.master; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.coordination.LeaderChecker; +import org.elasticsearch.cluster.coordination.PublicationTransportHandler; +import org.elasticsearch.cluster.coordination.StatefulPreVoteCollector; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class TransportMasterNodeActionIT extends ESIntegTestCase { + + @SuppressWarnings("unchecked") + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopyNoNullElements( + super.nodePlugins(), + MockTransportService.TestPlugin.class, + TestActionPlugin.class + ); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + // detect leader failover quickly + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "100ms") + .build(); + } + + public void testRoutingLoopProtection() { + + final var cleanupTasks = new ArrayList(); + + try { + final var newMaster = ensureSufficientMasterEligibleNodes(); + final long originalTerm = internalCluster().masterClient().admin().cluster().prepareState().get().getState().term(); + final var previousMasterKnowsNewMasterIsElectedLatch = configureElectionLatch(newMaster, cleanupTasks); + + final var newMasterReceivedReroutedMessageFuture = new PlainActionFuture<>(); + final var newMasterReceivedReroutedMessageListener = ActionListener.assertOnce(newMasterReceivedReroutedMessageFuture); + final var reroutedMessageReceived = ActionListener.assertOnce(ActionListener.noop()); + for (final var transportService : internalCluster().getInstances(TransportService.class)) { + final var mockTransportService = asInstanceOf(MockTransportService.class, transportService); + cleanupTasks.add(mockTransportService::clearAllRules); + + if (mockTransportService.getLocalNode().getName().equals(newMaster)) { + // Complete listener when the new master receives the re-routed message, ensure it only receives it once, and only from + // a node in the newMaster term. + mockTransportService.addRequestHandlingBehavior(TEST_ACTION_TYPE.name(), (handler, request, channel, task) -> { + assertThat(asInstanceOf(MasterNodeRequest.class, request).masterTerm(), greaterThan(originalTerm)); + newMasterReceivedReroutedMessageListener.onResponse(null); + handler.messageReceived(request, channel, task); + }); + } else { + // Disable every other node's ability to send pre-vote and publish requests + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.equals(StatefulPreVoteCollector.REQUEST_PRE_VOTE_ACTION_NAME) + || action.equals(PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME)) { + throw new ElasticsearchException("[{}] for [{}] denied", action, connection.getNode()); + } else { + connection.sendRequest(requestId, action, request, options); + } + }); + + // Assert that no other node receives the re-routed message more than once, and only from a node in the original term. + mockTransportService.addRequestHandlingBehavior(TEST_ACTION_TYPE.name(), (handler, request, channel, task) -> { + assertThat(asInstanceOf(MasterNodeRequest.class, request).masterTerm(), equalTo(originalTerm)); + reroutedMessageReceived.onResponse(null); + handler.messageReceived(request, channel, task); + }); + } + } + + final var newMasterStateApplierBlock = blockClusterStateApplier(newMaster, cleanupTasks); + + // trigger a cluster state update, which fails, causing a master failover + internalCluster().getCurrentMasterNodeInstance(ClusterService.class) + .submitUnbatchedStateUpdateTask("failover", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(Exception e) { + // expected + } + }); + + // Wait until the old master has acknowledged the new master's election + safeAwait(previousMasterKnowsNewMasterIsElectedLatch); + logger.info("New master is elected"); + + // perform a TransportMasterNodeAction on the new master, which doesn't know it's the master yet + final var testActionFuture = client(newMaster).execute(TEST_ACTION_TYPE, new TestRequest()); + + // wait for the request to come back to the new master + safeGet(newMasterReceivedReroutedMessageFuture); + + // Unblock state application on new master, allow it to know of its election win + safeAwait(newMasterStateApplierBlock); + + safeGet(testActionFuture); + } finally { + Releasables.closeExpectNoException(Releasables.wrap(cleanupTasks)); + } + } + + /** + * Block the cluster state applier on a node. Returns only when applier is blocked. + * + * @param nodeName The name of the node on which to block the applier + * @param cleanupTasks The list of clean up tasks + * @return A cyclic barrier which when awaited on will un-block the applier + */ + private static CyclicBarrier blockClusterStateApplier(String nodeName, ArrayList cleanupTasks) { + final var stateApplierBarrier = new CyclicBarrier(2); + internalCluster().getInstance(ClusterService.class, nodeName).getClusterApplierService().onNewClusterState("test", () -> { + // Meet to signify application is blocked + safeAwait(stateApplierBarrier); + // Wait for the signal to unblock + safeAwait(stateApplierBarrier); + return null; + }, ActionListener.noop()); + cleanupTasks.add(stateApplierBarrier::reset); + + // Wait until state application is blocked + safeAwait(stateApplierBarrier); + return stateApplierBarrier; + } + + /** + * Configure a latch that will be released when the existing master knows of the new master's election + * + * @param newMaster The name of the newMaster node + * @param cleanupTasks The list of cleanup tasks + * @return A latch that will be released when the old master acknowledges the new master's election + */ + private CountDownLatch configureElectionLatch(String newMaster, List cleanupTasks) { + final String originalMasterName = internalCluster().getMasterName(); + logger.info("Original master was {}, new master will be {}", originalMasterName, newMaster); + final var previousMasterKnowsNewMasterIsElectedLatch = new CountDownLatch(1); + ClusterStateApplier newMasterMonitor = event -> { + DiscoveryNode masterNode = event.state().nodes().getMasterNode(); + if (masterNode != null && masterNode.getName().equals(newMaster)) { + previousMasterKnowsNewMasterIsElectedLatch.countDown(); + } + }; + ClusterService originalMasterClusterService = internalCluster().getInstance(ClusterService.class, originalMasterName); + originalMasterClusterService.addStateApplier(newMasterMonitor); + cleanupTasks.add(() -> originalMasterClusterService.removeApplier(newMasterMonitor)); + return previousMasterKnowsNewMasterIsElectedLatch; + } + + /** + * Add some master-only nodes and block until they've joined the cluster + *

+ * Ensure that we've got 5 voting nodes in the cluster, this means even if the original + * master accepts its own failed state update before standing down, we can still + * establish a quorum without its (or our own) join. + */ + private static String ensureSufficientMasterEligibleNodes() { + final var votingConfigSizeListener = ClusterServiceUtils.addTemporaryStateListener( + internalCluster().getAnyMasterNodeInstance(ClusterService.class), + cs -> 5 <= cs.coordinationMetadata().getLastCommittedConfiguration().getNodeIds().size() + ); + + try { + final var newNodeNames = internalCluster().startMasterOnlyNodes(Math.max(1, 5 - internalCluster().numMasterNodes())); + safeAwait(votingConfigSizeListener); + return newNodeNames.get(0); + } finally { + votingConfigSizeListener.onResponse(null); + } + } + + private static final ActionType TEST_ACTION_TYPE = new ActionType<>("internal:test"); + + public static final class TestActionPlugin extends Plugin implements ActionPlugin { + @Override + public Collection> getActions() { + return List.of(new ActionHandler<>(TEST_ACTION_TYPE, TestTransportAction.class)); + } + } + + public static final class TestRequest extends MasterNodeRequest { + TestRequest() { + super(TEST_REQUEST_TIMEOUT); + } + + TestRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static final class TestTransportAction extends TransportMasterNodeAction { + @Inject + public TestTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + TEST_ACTION_TYPE.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TestRequest::new, + indexNameExpressionResolver, + in -> ActionResponse.Empty.INSTANCE, + threadPool.generic() + ); + } + + @Override + protected void masterOperation(Task task, TestRequest request, ClusterState state, ActionListener listener) { + listener.onResponse(ActionResponse.Empty.INSTANCE); + } + + @Override + protected ClusterBlockException checkBlock(TestRequest request, ClusterState state) { + return null; + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index 136db24767d22..c5c3e441363da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.function.Consumer; import java.util.stream.IntStream; @@ -310,7 +311,7 @@ public void testAddBlockToUnassignedIndex() throws Exception { } } - public void testConcurrentAddBlock() throws InterruptedException { + public void testConcurrentAddBlock() throws InterruptedException, ExecutionException { final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createIndex(indexName); @@ -322,31 +323,19 @@ public void testConcurrentAddBlock() throws InterruptedException { IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); ensureYellowAndNoInitializingShards(indexName); - - final CountDownLatch startClosing = new CountDownLatch(1); - final Thread[] threads = new Thread[randomIntBetween(2, 5)]; - final APIBlock block = randomAddableBlock(); + final int threadCount = randomIntBetween(2, 5); try { - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - safeAwait(startClosing); - try { - indicesAdmin().prepareAddBlock(block, indexName).get(); - assertIndexHasBlock(block, indexName); - } catch (final ClusterBlockException e) { - assertThat(e.blocks(), hasSize(1)); - assertTrue(e.blocks().stream().allMatch(b -> b.id() == block.getBlock().id())); - } - }); - threads[i].start(); - } - - startClosing.countDown(); - for (Thread thread : threads) { - thread.join(); - } + startInParallel(threadCount, i -> { + try { + indicesAdmin().prepareAddBlock(block, indexName).get(); + assertIndexHasBlock(block, indexName); + } catch (final ClusterBlockException e) { + assertThat(e.blocks(), hasSize(1)); + assertTrue(e.blocks().stream().allMatch(b -> b.id() == block.getBlock().id())); + } + }); assertIndexHasBlock(block, indexName); } finally { disableIndexBlock(indexName, block); @@ -422,34 +411,17 @@ public void testAddBlockWhileDeletingIndices() throws Exception { }; try { - for (final String indexToDelete : indices) { - threads.add(new Thread(() -> { - safeAwait(latch); - try { - assertAcked(indicesAdmin().prepareDelete(indexToDelete)); - } catch (final Exception e) { - exceptionConsumer.accept(e); - } - })); - } - for (final String indexToBlock : indices) { - threads.add(new Thread(() -> { - safeAwait(latch); - try { - indicesAdmin().prepareAddBlock(block, indexToBlock).get(); - } catch (final Exception e) { - exceptionConsumer.accept(e); + startInParallel(indices.length * 2, i -> { + try { + if (i < indices.length) { + assertAcked(indicesAdmin().prepareDelete(indices[i])); + } else { + indicesAdmin().prepareAddBlock(block, indices[i - indices.length]).get(); } - })); - } - - for (Thread thread : threads) { - thread.start(); - } - latch.countDown(); - for (Thread thread : threads) { - thread.join(); - } + } catch (final Exception e) { + exceptionConsumer.accept(e); + } + }); } finally { for (final String indexToBlock : indices) { try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index e0dbc74567053..ffa2cd29778ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -44,6 +45,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -562,6 +564,7 @@ public IndexMetadata randomCreate(String name) { settingsBuilder.put(randomSettings(Settings.EMPTY)).put(IndexMetadata.SETTING_VERSION_CREATED, randomVersion(random())); builder.settings(settingsBuilder); builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); + builder.eventIngestedRange(IndexLongFieldRange.UNKNOWN, TransportVersion.current()); int aliasCount = randomInt(10); for (int i = 0; i < aliasCount; i++) { builder.putAlias(randomAlias()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java index 3ff7e66d25639..c7d1d49f6e451 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java @@ -22,11 +22,12 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.junit.annotations.TestIssueLogging; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -40,9 +41,12 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class PrevalidateShardPathIT extends ESIntegTestCase { - @TestLogging( - value = "org.elasticsearch.cluster.service.MasterService:DEBUG", - reason = "https://github.com/elastic/elasticsearch/issues/104807" + @TestIssueLogging( + value = "org.elasticsearch.cluster.service.MasterService:DEBUG, " + + "org.elasticsearch.indices.store.IndicesStore:TRACE," + + "org.elasticsearch.indices.cluster.IndicesClusterStateService:DEBUG," + + "org.elasticsearch.indices.IndicesService:TRACE", + issueUrl = "https://github.com/elastic/elasticsearch/issues/104807" ) public void testCheckShards() throws Exception { internalCluster().startMasterOnlyNode(); @@ -130,6 +134,6 @@ public void testCheckShards() throws Exception { ); throw e; } - }); + }, 30, TimeUnit.SECONDS); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java index 27e63e5614744..13886cba9084c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -100,8 +100,8 @@ void syncFlush(String syncId) throws IOException { assertThat(getTranslogStats().getUncommittedOperations(), equalTo(0)); Map userData = new HashMap<>(getLastCommittedSegmentInfos().userData); SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); - assertThat(commitInfo.localCheckpoint, equalTo(getLastSyncedGlobalCheckpoint())); - assertThat(commitInfo.maxSeqNo, equalTo(getLastSyncedGlobalCheckpoint())); + assertThat(commitInfo.localCheckpoint(), equalTo(getLastSyncedGlobalCheckpoint())); + assertThat(commitInfo.maxSeqNo(), equalTo(getLastSyncedGlobalCheckpoint())); userData.put(Engine.SYNC_COMMIT_ID, syncId); indexWriter.setLiveCommitData(userData.entrySet()); indexWriter.commit(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java index acfc38ca12f89..be7610e55b8e6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java @@ -26,7 +26,6 @@ import java.util.Collection; import java.util.Optional; -import java.util.concurrent.Phaser; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -103,7 +102,7 @@ public void testMaxDocsLimit() throws Exception { assertThat(indexingResult.numFailures, equalTo(rejectedRequests)); assertThat(indexingResult.numSuccess, equalTo(0)); final IllegalArgumentException deleteError = expectThrows(IllegalArgumentException.class, client().prepareDelete("test", "any-id")); - assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); + assertThat(deleteError.getMessage(), containsString("Number of documents in the shard cannot exceed [" + maxDocs.get() + "]")); indicesAdmin().prepareRefresh("test").get(); assertNoFailuresAndResponse( prepareSearch("test").setQuery(new MatchAllQueryBuilder()).setTrackTotalHitsUpTo(Integer.MAX_VALUE).setSize(0), @@ -155,27 +154,18 @@ static IndexingResult indexDocs(int numRequests, int numThreads) throws Exceptio final AtomicInteger completedRequests = new AtomicInteger(); final AtomicInteger numSuccess = new AtomicInteger(); final AtomicInteger numFailure = new AtomicInteger(); - Thread[] indexers = new Thread[numThreads]; - Phaser phaser = new Phaser(indexers.length); - for (int i = 0; i < indexers.length; i++) { - indexers[i] = new Thread(() -> { - phaser.arriveAndAwaitAdvance(); - while (completedRequests.incrementAndGet() <= numRequests) { - try { - final DocWriteResponse resp = prepareIndex("test").setSource("{}", XContentType.JSON).get(); - numSuccess.incrementAndGet(); - assertThat(resp.status(), equalTo(RestStatus.CREATED)); - } catch (IllegalArgumentException e) { - numFailure.incrementAndGet(); - assertThat(e.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); - } + startInParallel(numThreads, i -> { + while (completedRequests.incrementAndGet() <= numRequests) { + try { + final DocWriteResponse resp = prepareIndex("test").setSource("{}", XContentType.JSON).get(); + numSuccess.incrementAndGet(); + assertThat(resp.status(), equalTo(RestStatus.CREATED)); + } catch (IllegalArgumentException e) { + numFailure.incrementAndGet(); + assertThat(e.getMessage(), containsString("Number of documents in the shard cannot exceed [" + maxDocs.get() + "]")); } - }); - indexers[i].start(); - } - for (Thread indexer : indexers) { - indexer.join(); - } + } + }); internalCluster().assertNoInFlightDocsInEngine(); return new IndexingResult(numSuccess.get(), numFailure.get()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 76d305ce8ea4b..3f79d7723beb3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -161,31 +161,18 @@ public void testConcurrentDynamicIgnoreBeyondLimitUpdates() throws Throwable { private Map indexConcurrently(int numberOfFieldsToCreate, Settings.Builder settings) throws Throwable { indicesAdmin().prepareCreate("index").setSettings(settings).get(); ensureGreen("index"); - final Thread[] indexThreads = new Thread[numberOfFieldsToCreate]; - final CountDownLatch startLatch = new CountDownLatch(1); final AtomicReference error = new AtomicReference<>(); - for (int i = 0; i < indexThreads.length; ++i) { + startInParallel(numberOfFieldsToCreate, i -> { final String id = Integer.toString(i); - indexThreads[i] = new Thread(new Runnable() { - @Override - public void run() { - try { - startLatch.await(); - assertEquals( - DocWriteResponse.Result.CREATED, - prepareIndex("index").setId(id).setSource("field" + id, "bar").get().getResult() - ); - } catch (Exception e) { - error.compareAndSet(null, e); - } - } - }); - indexThreads[i].start(); - } - startLatch.countDown(); - for (Thread thread : indexThreads) { - thread.join(); - } + try { + assertEquals( + DocWriteResponse.Result.CREATED, + prepareIndex("index").setId(id).setSource("field" + id, "bar").get().getResult() + ); + } catch (Exception e) { + error.compareAndSet(null, e); + } + }); if (error.get() != null) { throw error.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index c60b6bb72e8ed..53f632f6ba8d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -25,11 +25,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xcontent.XContentType; -import java.util.ArrayList; import java.util.Collection; -import java.util.List; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.stream.Stream; @@ -143,37 +139,14 @@ private void runGlobalCheckpointSyncTest( final int numberOfDocuments = randomIntBetween(0, 256); final int numberOfThreads = randomIntBetween(1, 4); - final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); // start concurrent indexing threads - final List threads = new ArrayList<>(numberOfThreads); - for (int i = 0; i < numberOfThreads; i++) { - final int index = i; - final Thread thread = new Thread(() -> { - try { - barrier.await(); - } catch (BrokenBarrierException | InterruptedException e) { - throw new RuntimeException(e); - } - for (int j = 0; j < numberOfDocuments; j++) { - final String id = Integer.toString(index * numberOfDocuments + j); - prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); - } - try { - barrier.await(); - } catch (BrokenBarrierException | InterruptedException e) { - throw new RuntimeException(e); - } - }); - threads.add(thread); - thread.start(); - } - - // synchronize the start of the threads - barrier.await(); - - // wait for the threads to finish - barrier.await(); + startInParallel(numberOfThreads, index -> { + for (int j = 0; j < numberOfDocuments; j++) { + final String id = Integer.toString(index * numberOfDocuments + j); + prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + } + }); afterIndexing.accept(client()); @@ -203,9 +176,6 @@ private void runGlobalCheckpointSyncTest( } }, 60, TimeUnit.SECONDS); ensureGreen("test"); - for (final Thread thread : threads) { - thread.join(); - } } public void testPersistGlobalCheckpoint() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index b9850bc95275c..5d996e44c6868 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -164,7 +164,7 @@ public void testDurableFlagHasEffect() { try { // the lastWriteLocaltion has a Integer.MAX_VALUE size so we have to create a new one return tlog.ensureSynced( - new Translog.Location(lastWriteLocation.generation, lastWriteLocation.translogLocation, 0), + new Translog.Location(lastWriteLocation.generation(), lastWriteLocation.translogLocation(), 0), SequenceNumbers.UNASSIGNED_SEQ_NO ); } catch (IOException e) { @@ -389,7 +389,7 @@ public void testMaybeFlush() throws Exception { logger.info( "--> translog stats [{}] gen [{}] commit_stats [{}] flush_stats [{}/{}]", Strings.toString(translogStats), - translog.getGeneration().translogFileGeneration, + translog.getGeneration().translogFileGeneration(), commitStats.getUserData(), flushStats.getPeriodic(), flushStats.getTotal() @@ -428,7 +428,7 @@ public void testMaybeRollTranslogGeneration() throws Exception { ); final Translog.Location location = result.getTranslogLocation(); shard.afterWriteOperation(); - if (location.translogLocation + location.size > generationThreshold) { + if (location.translogLocation() + location.size() > generationThreshold) { // wait until the roll completes assertBusy(() -> assertFalse(shard.shouldRollTranslogGeneration())); rolls++; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index b224d70eed8f8..e9e88a2d6b76c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedRunnable; @@ -127,7 +128,7 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { assertThat(state.nodes().get(shard.currentNodeId()).getName(), equalTo(node1)); } - public void testRelocationFailureNotRetriedForever() { + public void testRelocationFailureNotRetriedForever() throws Exception { String node1 = internalCluster().startNode(); createIndex("index1", 1, 0); ensureGreen("index1"); @@ -143,6 +144,16 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { updateIndexSettings(Settings.builder().put(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", node1), "index1"); ensureGreen("index1"); + var maxAttempts = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); + + // await all relocation attempts are exhausted + assertBusy(() -> { + var state = clusterAdmin().prepareState().get().getState(); + var shard = state.routingTable().index("index1").shard(0).primaryShard(); + assertThat(shard, notNullValue()); + assertThat(shard.relocationFailureInfo().failedRelocations(), equalTo(maxAttempts)); + }); + // ensure the shard remain started var state = clusterAdmin().prepareState().get().getState(); logger.info("Final routing is {}", state.getRoutingNodes().toString()); var shard = state.routingTable().index("index1").shard(0).primaryShard(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 62e6cb59994b2..08ce9af14ab13 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -16,10 +16,10 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 70cd143686dc8..0008ec1f9cbd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -37,7 +37,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -179,66 +178,53 @@ public void testUpdateMappingConcurrently() throws Throwable { final AtomicReference threadException = new AtomicReference<>(); final AtomicBoolean stop = new AtomicBoolean(false); - Thread[] threads = new Thread[3]; - final CyclicBarrier barrier = new CyclicBarrier(threads.length); final ArrayList clientArray = new ArrayList<>(); for (Client c : clients()) { clientArray.add(c); } - for (int j = 0; j < threads.length; j++) { - threads[j] = new Thread(() -> { - try { - barrier.await(); - - for (int i = 0; i < 100; i++) { - if (stop.get()) { - return; - } - - Client client1 = clientArray.get(i % clientArray.size()); - Client client2 = clientArray.get((i + 1) % clientArray.size()); - String indexName = i % 2 == 0 ? "test2" : "test1"; - String fieldName = Thread.currentThread().getName() + "_" + i; - - AcknowledgedResponse response = client1.admin() - .indices() - .preparePutMapping(indexName) - .setSource( - JsonXContent.contentBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject(fieldName) - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - ) - .setMasterNodeTimeout(TimeValue.timeValueMinutes(5)) - .get(); - - assertThat(response.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get(); - MappingMetadata mappings = getMappingResponse.getMappings().get(indexName); - @SuppressWarnings("unchecked") - Map properties = (Map) mappings.getSourceAsMap().get("properties"); - assertThat(properties.keySet(), Matchers.hasItem(fieldName)); + startInParallel(3, j -> { + try { + for (int i = 0; i < 100; i++) { + if (stop.get()) { + return; } - } catch (Exception e) { - threadException.set(e); - stop.set(true); - } - }); - - threads[j].setName("t_" + j); - threads[j].start(); - } - for (Thread t : threads) { - t.join(); - } + Client client1 = clientArray.get(i % clientArray.size()); + Client client2 = clientArray.get((i + 1) % clientArray.size()); + String indexName = i % 2 == 0 ? "test2" : "test1"; + String fieldName = "t_" + j + "_" + i; + + AcknowledgedResponse response = client1.admin() + .indices() + .preparePutMapping(indexName) + .setSource( + JsonXContent.contentBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject(fieldName) + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .setMasterNodeTimeout(TimeValue.timeValueMinutes(5)) + .get(); + + assertThat(response.isAcknowledged(), equalTo(true)); + GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get(); + MappingMetadata mappings = getMappingResponse.getMappings().get(indexName); + @SuppressWarnings("unchecked") + Map properties = (Map) mappings.getSourceAsMap().get("properties"); + assertThat(properties.keySet(), Matchers.hasItem(fieldName)); + } + } catch (Exception e) { + threadException.set(e); + stop.set(true); + } + }); if (threadException.get() != null) { throw threadException.get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 676f8185ecb84..d56e4a372c17c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -545,7 +545,10 @@ public void testRerouteRecovery() throws Exception { .size(); logger.info("--> start node B"); - final String nodeB = internalCluster().startNode(); + final String nodeB = internalCluster().startNode( + // Ensure that the target node has a high enough recovery max bytes per second to avoid any throttling + Settings.builder().put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "200mb") + ); ensureGreen(); @@ -1038,7 +1041,6 @@ public void testHistoryRetention() throws Exception { assertThat(recoveryState.getTranslog().recoveredOperations(), greaterThan(0)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105122") public void testDoNotInfinitelyWaitForMapping() { internalCluster().ensureAtLeastNumDataNodes(3); createIndex( @@ -1208,8 +1210,8 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { SequenceNumbers.CommitInfo commitInfoAfterLocalRecovery = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( startRecoveryRequest.metadataSnapshot().commitUserData().entrySet() ); - assertThat(commitInfoAfterLocalRecovery.localCheckpoint, equalTo(lastSyncedGlobalCheckpoint)); - assertThat(commitInfoAfterLocalRecovery.maxSeqNo, equalTo(lastSyncedGlobalCheckpoint)); + assertThat(commitInfoAfterLocalRecovery.localCheckpoint(), equalTo(lastSyncedGlobalCheckpoint)); + assertThat(commitInfoAfterLocalRecovery.maxSeqNo(), equalTo(lastSyncedGlobalCheckpoint)); assertThat(startRecoveryRequest.startingSeqNo(), equalTo(lastSyncedGlobalCheckpoint + 1)); ensureGreen(indexName); assertThat((long) localRecoveredOps.get(), equalTo(lastSyncedGlobalCheckpoint - localCheckpointOfSafeCommit)); @@ -2009,8 +2011,8 @@ private long getLocalCheckpointOfSafeCommit(IndexCommit safeIndexCommit) throws final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( safeIndexCommit.getUserData().entrySet() ); - final long commitLocalCheckpoint = commitInfo.localCheckpoint; - final long maxSeqNo = commitInfo.maxSeqNo; + final long commitLocalCheckpoint = commitInfo.localCheckpoint(); + final long maxSeqNo = commitInfo.maxSeqNo(); final LocalCheckpointTracker localCheckpointTracker = new LocalCheckpointTracker(maxSeqNo, commitLocalCheckpoint); // In certain scenarios it is possible that the local checkpoint captured during commit lags behind, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index 77cdc2e99977d..d52294d7584b8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -38,12 +38,12 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Set; -import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -170,7 +170,7 @@ public void testCloseUnassignedIndex() throws Exception { assertIndexIsClosed(indexName); } - public void testConcurrentClose() throws InterruptedException { + public void testConcurrentClose() throws InterruptedException, ExecutionException { final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createIndex(indexName); @@ -196,25 +196,14 @@ public void testConcurrentClose() throws InterruptedException { assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get(indexName).getStatus().value(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW.value())); - final CountDownLatch startClosing = new CountDownLatch(1); - final Thread[] threads = new Thread[randomIntBetween(2, 5)]; - - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - safeAwait(startClosing); - try { - indicesAdmin().prepareClose(indexName).get(); - } catch (final Exception e) { - assertException(e, indexName); - } - }); - threads[i].start(); - } - - startClosing.countDown(); - for (Thread thread : threads) { - thread.join(); - } + final int tasks = randomIntBetween(2, 5); + startInParallel(tasks, i -> { + try { + indicesAdmin().prepareClose(indexName).get(); + } catch (final Exception e) { + assertException(e, indexName); + } + }); assertIndexIsClosed(indexName); } @@ -256,37 +245,18 @@ public void testCloseWhileDeletingIndices() throws Exception { } assertThat(clusterAdmin().prepareState().get().getState().metadata().indices().size(), equalTo(indices.length)); - final List threads = new ArrayList<>(); - final CountDownLatch latch = new CountDownLatch(1); - - for (final String indexToDelete : indices) { - threads.add(new Thread(() -> { - safeAwait(latch); - try { - assertAcked(indicesAdmin().prepareDelete(indexToDelete)); - } catch (final Exception e) { - assertException(e, indexToDelete); - } - })); - } - for (final String indexToClose : indices) { - threads.add(new Thread(() -> { - safeAwait(latch); - try { - indicesAdmin().prepareClose(indexToClose).get(); - } catch (final Exception e) { - assertException(e, indexToClose); + startInParallel(indices.length * 2, i -> { + final String index = indices[i % indices.length]; + try { + if (i < indices.length) { + assertAcked(indicesAdmin().prepareDelete(index)); + } else { + indicesAdmin().prepareClose(index).get(); } - })); - } - - for (Thread thread : threads) { - thread.start(); - } - latch.countDown(); - for (Thread thread : threads) { - thread.join(); - } + } catch (final Exception e) { + assertException(e, index); + } + }); } public void testConcurrentClosesAndOpens() throws Exception { @@ -297,37 +267,21 @@ public void testConcurrentClosesAndOpens() throws Exception { indexer.setFailureAssertion(e -> {}); waitForDocs(1, indexer); - final CountDownLatch latch = new CountDownLatch(1); + final int closes = randomIntBetween(1, 3); + final int opens = randomIntBetween(1, 3); + final CyclicBarrier barrier = new CyclicBarrier(opens + closes); - final List threads = new ArrayList<>(); - for (int i = 0; i < randomIntBetween(1, 3); i++) { - threads.add(new Thread(() -> { - try { - safeAwait(latch); + startInParallel(opens + closes, i -> { + try { + if (i < closes) { indicesAdmin().prepareClose(indexName).get(); - } catch (final Exception e) { - throw new AssertionError(e); - } - })); - } - for (int i = 0; i < randomIntBetween(1, 3); i++) { - threads.add(new Thread(() -> { - try { - safeAwait(latch); + } else { assertAcked(indicesAdmin().prepareOpen(indexName).get()); - } catch (final Exception e) { - throw new AssertionError(e); } - })); - } - - for (Thread thread : threads) { - thread.start(); - } - latch.countDown(); - for (Thread thread : threads) { - thread.join(); - } + } catch (final Exception e) { + throw new AssertionError(e); + } + }); indexer.stopAndAwaitStopped(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index b160834d675d9..6647356f070ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -35,7 +35,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -187,30 +186,17 @@ public void testCloseWhileRelocatingShards() throws Exception { ClusterRerouteUtils.reroute(client(), commands.toArray(AllocationCommand[]::new)); // start index closing threads - final List threads = new ArrayList<>(); - for (final String indexToClose : indices) { - final Thread thread = new Thread(() -> { - try { - safeAwait(latch); - } finally { - release.countDown(); - } - // Closing is not always acknowledged when shards are relocating: this is the case when the target shard is initializing - // or is catching up operations. In these cases the TransportVerifyShardBeforeCloseAction will detect that the global - // and max sequence number don't match and will not ack the close. - AcknowledgedResponse closeResponse = indicesAdmin().prepareClose(indexToClose).get(); - if (closeResponse.isAcknowledged()) { - assertTrue("Index closing should not be acknowledged twice", acknowledgedCloses.add(indexToClose)); - } - }); - threads.add(thread); - thread.start(); - } - - latch.countDown(); - for (Thread thread : threads) { - thread.join(); - } + startInParallel(indices.length, i -> { + release.countDown(); + // Closing is not always acknowledged when shards are relocating: this is the case when the target shard is initializing + // or is catching up operations. In these cases the TransportVerifyShardBeforeCloseAction will detect that the global + // and max sequence number don't match and will not ack the close. + final String indexToClose = indices[i]; + AcknowledgedResponse closeResponse = indicesAdmin().prepareClose(indexToClose).get(); + if (closeResponse.isAcknowledged()) { + assertTrue("Index closing should not be acknowledged twice", acknowledgedCloses.add(indexToClose)); + } + }); // stop indexers first without waiting for stop to not redundantly index on some while waiting for another one to stop for (BackgroundIndexer indexer : indexers.values()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java index 58d1d7d88ec55..7797371a2823b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.plugins.internal; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; @@ -107,7 +108,8 @@ public IndexResult index(Index index) throws IOException { config().getMapperService(), DocumentSizeAccumulator.EMPTY_INSTANCE ); - documentParsingReporter.onIndexingCompleted(index.parsedDoc()); + ParsedDocument parsedDocument = index.parsedDoc(); + documentParsingReporter.onIndexingCompleted(parsedDocument); return result; } @@ -122,15 +124,9 @@ public TestDocumentParsingProviderPlugin() {} @Override public DocumentParsingProvider getDocumentParsingProvider() { return new DocumentParsingProvider() { - - @Override - public DocumentSizeObserver newFixedSizeDocumentObserver(long normalisedBytesParsed) { - return new TestDocumentSizeObserver(); - } - @Override - public DocumentSizeObserver newDocumentSizeObserver() { - return new TestDocumentSizeObserver(); + public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { + return new TestDocumentSizeObserver(0L); } @Override @@ -155,8 +151,7 @@ public TestDocumentSizeReporter(String indexName) { @Override public void onIndexingCompleted(ParsedDocument parsedDocument) { - DocumentSizeObserver documentSizeObserver = parsedDocument.getDocumentSizeObserver(); - COUNTER.addAndGet(documentSizeObserver.normalisedBytesParsed()); + COUNTER.addAndGet(parsedDocument.getDocumentSizeObserver().normalisedBytesParsed()); assertThat(indexName, equalTo(TEST_INDEX_NAME)); } } @@ -164,10 +159,15 @@ public void onIndexingCompleted(ParsedDocument parsedDocument) { public static class TestDocumentSizeObserver implements DocumentSizeObserver { long counter = 0; + public TestDocumentSizeObserver(long counter) { + this.counter = counter; + } + @Override public XContentParser wrapParser(XContentParser xContentParser) { hasWrappedParser = true; return new FilterXContentParserWrapper(xContentParser) { + @Override public Token nextToken() throws IOException { counter++; @@ -180,5 +180,6 @@ public Token nextToken() throws IOException { public long normalisedBytesParsed() { return counter; } + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index 6e89c1447edb6..2fe808d813ccc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -42,15 +42,16 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class FileSettingsServiceIT extends ESIntegTestCase { - private static AtomicLong versionCounter = new AtomicLong(1); + private static final AtomicLong versionCounter = new AtomicLong(1); - private static String testJSON = """ + private static final String testJSON = """ { "metadata": { "version": "%s", @@ -63,7 +64,7 @@ public class FileSettingsServiceIT extends ESIntegTestCase { } }"""; - private static String testJSON43mb = """ + private static final String testJSON43mb = """ { "metadata": { "version": "%s", @@ -76,7 +77,7 @@ public class FileSettingsServiceIT extends ESIntegTestCase { } }"""; - private static String testCleanupJSON = """ + private static final String testCleanupJSON = """ { "metadata": { "version": "%s", @@ -87,7 +88,7 @@ public class FileSettingsServiceIT extends ESIntegTestCase { } }"""; - private static String testErrorJSON = """ + private static final String testErrorJSON = """ { "metadata": { "version": "%s", @@ -165,8 +166,7 @@ public void clusterChanged(ClusterChangedEvent event) { private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion, String expectedBytesPerSec) throws Exception { - boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); - assertTrue(awaitSuccessful); + assertTrue(savedClusterState.await(20, TimeUnit.SECONDS)); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) @@ -180,11 +180,13 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb") ); - assertEquals( - "java.lang.IllegalArgumentException: Failed to process request " - + "[org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest/unset] " - + "with errors: [[indices.recovery.max_bytes_per_sec] set as read-only by [file_settings]]", - expectThrows(ExecutionException.class, () -> clusterAdmin().updateSettings(req).get()).getMessage() + assertThat( + expectThrows(ExecutionException.class, () -> clusterAdmin().updateSettings(req).get()).getMessage(), + is( + "java.lang.IllegalArgumentException: Failed to process request " + + "[org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest/unset] " + + "with errors: [[indices.recovery.max_bytes_per_sec] set as read-only by [file_settings]]" + ) ); } @@ -256,16 +258,15 @@ public void testReservedStatePersistsOnRestart() throws Exception { internalCluster().restartNode(masterNode); final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest()).actionGet(); - assertEquals( - 1, + assertThat( clusterStateResponse.getState() .metadata() .reservedStateMetadata() .get(FileSettingsService.NAMESPACE) .handlers() .get(ReservedClusterSettingsAction.NAME) - .keys() - .size() + .keys(), + hasSize(1) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java index b76bec0652732..7ad464fee92ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest; import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -18,18 +19,28 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.stream.Stream; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numClientNodes = 1, numDataNodes = 0) public class RestControllerIT extends ESIntegTestCase { @Override protected boolean addMockHttpTransport() { @@ -43,9 +54,117 @@ public void testHeadersEmittedWithChunkedResponses() throws IOException { assertEquals(ChunkedResponseWithHeadersPlugin.HEADER_VALUE, response.getHeader(ChunkedResponseWithHeadersPlugin.HEADER_NAME)); } + public void testMetricsEmittedOnSuccess() throws IOException { + final var client = getRestClient(); + final var request = new Request("GET", TestEchoStatusCodePlugin.ROUTE); + request.addParameter("status_code", "200"); + final var response = client.performRequest(request); + + assertEquals(200, response.getStatusLine().getStatusCode()); + + assertMeasurement(metric -> { + assertThat(metric.getLong(), is(1L)); + assertThat(metric.attributes(), hasEntry(RestController.HANDLER_NAME_KEY, TestEchoStatusCodePlugin.NAME)); + assertThat(metric.attributes(), hasEntry(RestController.REQUEST_METHOD_KEY, "GET")); + assertThat(metric.attributes(), hasEntry(RestController.STATUS_CODE_KEY, 200)); + }); + } + + public void testMetricsEmittedOnRestError() throws IOException { + final var client = getRestClient(); + final var request = new Request("GET", TestEchoStatusCodePlugin.ROUTE); + request.addParameter("status_code", "503"); + final var response = expectThrows(ResponseException.class, () -> client.performRequest(request)); + + assertEquals(503, response.getResponse().getStatusLine().getStatusCode()); + assertMeasurement(metric -> { + assertThat(metric.getLong(), is(1L)); + assertThat(metric.attributes(), hasEntry(RestController.HANDLER_NAME_KEY, TestEchoStatusCodePlugin.NAME)); + assertThat(metric.attributes(), hasEntry(RestController.REQUEST_METHOD_KEY, "GET")); + assertThat(metric.attributes(), hasEntry(RestController.STATUS_CODE_KEY, 503)); + }); + } + + public void testMetricsEmittedOnWrongMethod() throws IOException { + final var client = getRestClient(); + final var request = new Request("DELETE", TestEchoStatusCodePlugin.ROUTE); + final var response = expectThrows(ResponseException.class, () -> client.performRequest(request)); + + assertEquals(405, response.getResponse().getStatusLine().getStatusCode()); + assertMeasurement(metric -> { + assertThat(metric.getLong(), is(1L)); + assertThat(metric.attributes(), hasEntry(RestController.STATUS_CODE_KEY, RestStatus.METHOD_NOT_ALLOWED.getStatus())); + }); + } + + private static void assertMeasurement(Consumer measurementConsumer) { + var measurements = new ArrayList(); + for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { + final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); + telemetryPlugin.collect(); + + final var metrics = telemetryPlugin.getLongCounterMeasurement(RestController.METRIC_REQUESTS_TOTAL); + measurements.addAll(metrics); + } + assertThat(measurements, hasSize(1)); + measurementConsumer.accept(measurements.get(0)); + } + @Override protected Collection> nodePlugins() { - return CollectionUtils.appendToCopy(super.nodePlugins(), ChunkedResponseWithHeadersPlugin.class); + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(ChunkedResponseWithHeadersPlugin.class, TestEchoStatusCodePlugin.class, TestTelemetryPlugin.class) + ).toList(); + } + + public static class TestEchoStatusCodePlugin extends Plugin implements ActionPlugin { + static final String ROUTE = "/_test/echo_status_code"; + static final String NAME = "test_echo_status_code"; + + @Override + public Collection getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new BaseRestHandler() { + @Override + public String getName() { + return NAME; + } + + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.GET, ROUTE), new Route(RestRequest.Method.POST, ROUTE)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + var statusCode = request.param("status_code"); + client.getLocalNodeId(); + var restStatus = RestStatus.fromCode(Integer.parseInt(statusCode)); + return channel -> { + final var response = RestResponse.chunked( + restStatus, + ChunkedRestResponseBodyPart.fromXContent( + params -> Iterators.single((b, p) -> b.startObject().endObject()), + request, + channel + ), + null + ); + channel.sendResponse(response); + }; + } + }); + } } public static class ChunkedResponseWithHeadersPlugin extends Plugin implements ActionPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java index a12a26d69c5ff..f5fdd752a6f57 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java @@ -39,4 +39,26 @@ public void testCollapse() { } ); } + + public void testCollapseWithDocValueFields() { + final String indexName = "test_collapse"; + createIndex(indexName); + final String collapseField = "collapse_field"; + final String otherField = "other_field"; + assertAcked(indicesAdmin().preparePutMapping(indexName).setSource(collapseField, "type=keyword", otherField, "type=keyword")); + index(indexName, "id_1_0", Map.of(collapseField, "value1", otherField, "other_value1")); + index(indexName, "id_1_1", Map.of(collapseField, "value1", otherField, "other_value2")); + index(indexName, "id_2_0", Map.of(collapseField, "value2", otherField, "other_value3")); + refresh(indexName); + + assertNoFailuresAndResponse( + prepareSearch(indexName).setQuery(new MatchAllQueryBuilder()) + .addDocValueField(otherField) + .setCollapse(new CollapseBuilder(collapseField).setInnerHits(new InnerHitBuilder("ih").setSize(2))), + searchResponse -> { + assertEquals(collapseField, searchResponse.getHits().getCollapseField()); + assertEquals(Set.of(new BytesRef("value1"), new BytesRef("value2")), Set.of(searchResponse.getHits().getCollapseValues())); + } + ); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index a9ff9f15a7e92..efb283f047bb2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -24,9 +24,9 @@ import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.LongBounds; import org.elasticsearch.search.aggregations.metrics.Avg; @@ -241,7 +241,7 @@ public void testSingleValuedField() throws Exception { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -283,7 +283,7 @@ public void testSingleValuedFieldWithTimeZone() throws Exception { assertThat(buckets.size(), equalTo(6)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -354,7 +354,7 @@ public void testSingleValued_timeZone_epoch() throws Exception { expectedKeys.add(ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC)); Iterator keyIterator = expectedKeys.iterator(); - for (Histogram.Bucket bucket : buckets) { + for (Bucket bucket : buckets) { assertThat(bucket, notNullValue()); ZonedDateTime expectedKey = keyIterator.next(); String bucketKey = bucket.getKeyAsString(); @@ -380,7 +380,7 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { assertThat(buckets.size(), equalTo(3)); int i = 0; - for (Histogram.Bucket bucket : buckets) { + for (Bucket bucket : buckets) { assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } @@ -400,7 +400,7 @@ public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(3)); int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { + for (Bucket bucket : histo.getBuckets()) { assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } @@ -420,7 +420,7 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(3)); int i = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { + for (Bucket bucket : histo.getBuckets()) { assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } @@ -440,7 +440,7 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(3)); int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { + for (Bucket bucket : histo.getBuckets()) { assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } @@ -465,7 +465,7 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -521,7 +521,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(3)); int i = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { + for (Bucket bucket : histo.getBuckets()) { assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } @@ -544,7 +544,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception assertThat(histo.getBuckets().size(), equalTo(3)); int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { + for (Bucket bucket : histo.getBuckets()) { assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } @@ -567,7 +567,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws assertThat(histo.getBuckets().size(), equalTo(3)); int i = 2; - for (Histogram.Bucket bucket : histo.getBuckets()) { + for (Bucket bucket : histo.getBuckets()) { assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } @@ -590,7 +590,7 @@ public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { assertThat(histo.getBuckets().size(), equalTo(3)); int i = 1; - for (Histogram.Bucket bucket : histo.getBuckets()) { + for (Bucket bucket : histo.getBuckets()) { assertThat(bucket.getKey(), equalTo(date(i, 1))); i++; } @@ -645,7 +645,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -688,7 +688,7 @@ public void testMultiValuedField() throws Exception { assertThat(buckets.size(), equalTo(4)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -729,9 +729,9 @@ public void testMultiValuedFieldOrderedByCountDesc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(4)); - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo(date(3, 1))); assertThat(bucket.getDocCount(), equalTo(5L)); @@ -781,7 +781,7 @@ public void testMultiValuedFieldWithValueScript() throws Exception { assertThat(buckets.size(), equalTo(4)); ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -835,7 +835,7 @@ public void testScriptSingleValue() throws Exception { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -874,7 +874,7 @@ public void testScriptMultiValued() throws Exception { assertThat(buckets.size(), equalTo(4)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -940,7 +940,7 @@ public void testPartiallyUnmapped() throws Exception { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -976,10 +976,10 @@ public void testEmptyAggregation() throws Exception { assertThat(response.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = response.getAggregations().get("histo"); assertThat(histo, Matchers.notNullValue()); - List buckets = histo.getBuckets(); + List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - Histogram.Bucket bucket = buckets.get(1); + Bucket bucket = buckets.get(1); assertThat(bucket, Matchers.notNullValue()); assertThat(bucket.getKeyAsString(), equalTo("1.0")); @@ -1013,10 +1013,10 @@ public void testSingleValueWithTimeZone() throws Exception { assertThat(response.getHits().getTotalHits().value, equalTo(5L)); Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); + List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(2)); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo("2014-03-10:00-00-00-02:00")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -1118,7 +1118,7 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { ZonedDateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern))); @@ -1185,7 +1185,7 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { assertThat(buckets.size(), equalTo(24)); for (int i = 0; i < buckets.size(); i++) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); ZonedDateTime zonedDateTime = timeZoneStartToday.plus(i * 60 * 60 * 1000, ChronoUnit.MILLIS); assertThat("InternalBucket " + i + " had wrong key", (ZonedDateTime) bucket.getKey(), equalTo(zonedDateTime)); @@ -1283,11 +1283,11 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception assertSearchHits(response, "0", "1", "2", "3", "4"); Histogram histo = response.getAggregations().get("date_histo"); - List buckets = histo.getBuckets(); + List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(1)); ZonedDateTime key = ZonedDateTime.of(2014, 3, 10, 0, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -1315,7 +1315,7 @@ public void testIssue6965() { assertThat(buckets.size(), equalTo(3)); ZonedDateTime key = ZonedDateTime.of(2011, 12, 31, 23, 0, 0, 0, ZoneOffset.UTC); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); @@ -1683,7 +1683,7 @@ private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); int i = 0; - for (Histogram.Bucket bucket : histogram.getBuckets()) { + for (Bucket bucket : histogram.getBuckets()) { assertThat(bucket, notNullValue()); assertThat(key(bucket), equalTo(expectedKeys[i])); assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); @@ -1699,7 +1699,7 @@ private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { ); } - private ZonedDateTime key(Histogram.Bucket bucket) { + private ZonedDateTime key(Bucket bucket) { return (ZonedDateTime) bucket.getKey(); } @@ -1753,7 +1753,7 @@ public void testDateKeyFormatting() { ), response -> { InternalDateHistogram histogram = response.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); + List buckets = histogram.getBuckets(); assertThat(buckets.get(0).getKeyAsString(), equalTo("2012-01-01T00:00:00.000-07:00")); assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-01T00:00:00.000-07:00")); assertThat(buckets.get(2).getKeyAsString(), equalTo("2012-03-01T00:00:00.000-07:00")); @@ -1770,7 +1770,7 @@ public void testHardBoundsOnDates() { ), response -> { InternalDateHistogram histogram = response.getAggregations().get("histo"); - List buckets = histogram.getBuckets(); + List buckets = histogram.getBuckets(); assertThat(buckets.size(), equalTo(30)); assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-03T00:00:00.000Z")); assertThat(buckets.get(29).getKeyAsString(), equalTo("2012-03-02T00:00:00.000Z")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 421c1475eb5bc..5894837e257bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -19,10 +19,10 @@ import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.DoubleBounds; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Stats; @@ -252,7 +252,7 @@ public void testSingleValuedField() throws Exception { assertThat(buckets.size(), equalTo(numValueBuckets)); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); @@ -276,7 +276,7 @@ public void singleValuedField_withOffset() throws Exception { assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets)); // first bucket should start at -5, contain 4 documents - Histogram.Bucket bucket = histo.getBuckets().get(0); + Bucket bucket = histo.getBuckets().get(0); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(-5L)); assertThat(bucket.getDocCount(), equalTo(4L)); @@ -310,7 +310,7 @@ public void testSingleValuedFieldWithRandomOffset() throws Exception { long docsCounted = 0; for (int i = 0; i < expectedNumberOfBuckets; ++i) { - Histogram.Bucket bucket = histo.getBuckets().get(i); + Bucket bucket = histo.getBuckets().get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i - 1) * interval + offset))); if (i == 0) { @@ -340,9 +340,9 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); @@ -362,9 +362,9 @@ public void testsingleValuedFieldOrderedByKeyDesc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1); + Bucket bucket = buckets.get(numValueBuckets - i - 1); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); @@ -385,10 +385,10 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); Set buckets = new HashSet<>(); - List histoBuckets = new ArrayList<>(histo.getBuckets()); + List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MIN_VALUE; for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = histoBuckets.get(i); + Bucket bucket = histoBuckets.get(i); assertThat(bucket, notNullValue()); long key = ((Number) bucket.getKey()).longValue(); assertEquals(0, key % interval); @@ -413,10 +413,10 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); Set buckets = new HashSet<>(); - List histoBuckets = new ArrayList<>(histo.getBuckets()); + List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MAX_VALUE; for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = histoBuckets.get(i); + Bucket bucket = histoBuckets.get(i); assertThat(bucket, notNullValue()); long key = ((Number) bucket.getKey()).longValue(); assertEquals(0, key % interval); @@ -446,9 +446,9 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); @@ -486,9 +486,9 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { Set visited = new HashSet<>(); double previousSum = Double.NEGATIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); long key = ((Number) bucket.getKey()).longValue(); assertTrue(visited.add(key)); @@ -527,9 +527,9 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception Set visited = new HashSet<>(); double previousSum = Double.POSITIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); long key = ((Number) bucket.getKey()).longValue(); assertTrue(visited.add(key)); @@ -569,9 +569,9 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Set visited = new HashSet<>(); double previousSum = Double.POSITIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); long key = ((Number) bucket.getKey()).longValue(); assertTrue(visited.add(key)); @@ -611,9 +611,9 @@ public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() thro Set visited = new HashSet<>(); double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); long key = ((Number) bucket.getKey()).longValue(); assertTrue(visited.add(key)); @@ -646,9 +646,9 @@ public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); @@ -705,7 +705,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { assertThat(buckets.size(), equalTo(numBuckets)); for (int i = 0; i < numBuckets; i++) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); int key = ((2 / interval) + i) * interval; assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); @@ -726,7 +726,7 @@ public void testMultiValuedField() throws Exception { assertThat(buckets.size(), equalTo(numValuesBuckets)); for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); @@ -746,9 +746,9 @@ public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets)); - List buckets = new ArrayList<>(histo.getBuckets()); + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1); + Bucket bucket = buckets.get(numValuesBuckets - i - 1); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); @@ -783,7 +783,7 @@ public void testMultiValuedFieldWithValueScript() throws Exception { assertThat(buckets.size(), equalTo(numBuckets)); for (int i = 0; i < numBuckets; i++) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); int key = ((2 / interval) + i) * interval; assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key)); @@ -807,7 +807,7 @@ public void testScriptSingleValue() throws Exception { assertThat(buckets.size(), equalTo(numValueBuckets)); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); @@ -830,7 +830,7 @@ public void testScriptMultiValued() throws Exception { assertThat(buckets.size(), equalTo(numValuesBuckets)); for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); @@ -862,7 +862,7 @@ public void testPartiallyUnmapped() throws Exception { assertThat(buckets.size(), equalTo(numValueBuckets)); for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); @@ -885,7 +885,7 @@ public void testPartiallyUnmappedWithExtendedBounds() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(numValueBuckets + 3)); - Histogram.Bucket bucket = buckets.get(0); + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) -1 * 2 * interval)); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -919,7 +919,7 @@ public void testEmptyAggregation() throws Exception { Histogram histo = response.getAggregations().get("histo"); assertThat(histo, Matchers.notNullValue()); List buckets = histo.getBuckets(); - Histogram.Bucket bucket = buckets.get(1); + Bucket bucket = buckets.get(1); assertThat(bucket, Matchers.notNullValue()); histo = bucket.getAggregations().get("sub_histo"); @@ -984,7 +984,7 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception { long key = startKey; for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); @@ -1058,7 +1058,7 @@ public void testEmptyWithExtendedBounds() throws Exception { long key = startKey; for (int i = 0; i < bucketsCount; i++) { - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -1302,7 +1302,7 @@ private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); int i = 0; - for (Histogram.Bucket bucket : histogram.getBuckets()) { + for (Bucket bucket : histogram.getBuckets()) { assertThat(bucket, notNullValue()); assertThat(key(bucket), equalTo(expectedKeys[i])); assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); @@ -1318,7 +1318,7 @@ private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) ); } - private long key(Histogram.Bucket bucket) { + private long key(Bucket bucket) { return ((Number) bucket.getKey()).longValue(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 9a27b0d8f75a3..1cce1ab3b1c4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; @@ -31,6 +32,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -47,6 +49,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -845,4 +848,45 @@ public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { containsString("[inner_hits] already contains an entry for key [property]") ); } + + public void testSyntheticSource() throws Exception { + assertAcked( + prepareCreate("synthetic").setMapping( + jsonBuilder().startObject() + .startObject("_source") + .field("mode", "synthetic") + .endObject() + .startObject("properties") + .startObject("nested") + .field("type", "nested") + .startObject("properties") + .startObject("number") + .field("type", "long") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + ensureGreen("synthetic"); + + prepareIndex("synthetic").setId("1") + .setSource( + jsonBuilder().startObject().startArray("nested").startObject().field("number", "a").endObject().endArray().endObject() + ) + .get(); + refresh("synthetic"); + + assertResponse(client().prepareSearch("synthetic").addFetchField("_ignored"), searchResponse -> { + assertHitCount(searchResponse, 1); + assertEquals(1, searchResponse.getHits().getHits().length); + SearchHit searchHit = searchResponse.getHits().getAt(0); + assertEquals("nested.number", searchHit.getFields().get("_ignored").getValue()); + @SuppressWarnings("unchecked") + Map nested = (Map) searchHit.getSourceAsMap().get("nested"); + assertEquals("a", nested.get("number")); + }); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 21a607f113f14..35a117ac8922b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.terms.SignificantTerms; @@ -136,8 +135,8 @@ public void testXContentResponse() throws Exception { StringTerms classes = response.getAggregations().get("class"); assertThat(classes.getBuckets().size(), equalTo(2)); for (Terms.Bucket classBucket : classes.getBuckets()) { - Map aggs = classBucket.getAggregations().asMap(); - assertTrue(aggs.containsKey("sig_terms")); + InternalAggregations aggs = classBucket.getAggregations(); + assertNotNull(aggs.get("sig_terms")); SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); assertThat(agg.getBuckets().size(), equalTo(1)); String term = agg.iterator().next().getKeyAsString(); @@ -323,21 +322,21 @@ public void testBackgroundVsSeparateSet( assertNoFailuresAndResponse(request1, response1 -> assertNoFailuresAndResponse(request2, response2 -> { StringTerms classes = response1.getAggregations().get("class"); - SignificantTerms sigTerms0 = ((SignificantTerms) (classes.getBucketByKey("0").getAggregations().asMap().get("sig_terms"))); + SignificantTerms sigTerms0 = classes.getBucketByKey("0").getAggregations().get("sig_terms"); assertThat(sigTerms0.getBuckets().size(), equalTo(2)); double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore(); double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore(); - SignificantTerms sigTerms1 = ((SignificantTerms) (classes.getBucketByKey("1").getAggregations().asMap().get("sig_terms"))); + SignificantTerms sigTerms1 = classes.getBucketByKey("1").getAggregations().get("sig_terms"); double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore(); double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore(); InternalAggregations aggs = response2.getAggregations(); - sigTerms0 = (SignificantTerms) ((InternalFilter) aggs.get("0")).getAggregations().getAsMap().get("sig_terms"); + sigTerms0 = ((InternalFilter) aggs.get("0")).getAggregations().get("sig_terms"); double score00SeparateSets = sigTerms0.getBucketByKey("0").getSignificanceScore(); double score01SeparateSets = sigTerms0.getBucketByKey("1").getSignificanceScore(); - sigTerms1 = (SignificantTerms) ((InternalFilter) aggs.get("1")).getAggregations().getAsMap().get("sig_terms"); + sigTerms1 = ((InternalFilter) aggs.get("1")).getAggregations().get("sig_terms"); double score10SeparateSets = sigTerms1.getBucketByKey("0").getSignificanceScore(); double score11SeparateSets = sigTerms1.getBucketByKey("1").getSignificanceScore(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index e15ad15bb4e3a..750868defde97 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -290,7 +290,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); ExtendedStats stats = global.getAggregations().get("stats"); assertThat(stats, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 3f5d8e441dc44..13d66a5cf3949 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -222,7 +222,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); PercentileRanks values = global.getAggregations().get("percentile_ranks"); assertThat(values, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 0dbc811a7debc..cd69fb8241ef2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -199,7 +199,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); Percentiles percentiles = global.getAggregations().get("percentiles"); assertThat(percentiles, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index 06f43416eb03a..6c80931914ac6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -185,7 +185,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), is("global")); assertThat(global.getDocCount(), is((long) NUMBER_OF_DOCS)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().entrySet(), hasSize(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); final MedianAbsoluteDeviation mad = global.getAggregations().get("mad"); assertThat(mad, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 02c45c4aade1b..eeee745b32f92 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -21,9 +21,9 @@ import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -722,7 +722,7 @@ public void testInitMapCombineReduceGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(numDocs)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); ScriptedMetric scriptedMetricAggregation = global.getAggregations().get("scripted"); assertThat(scriptedMetricAggregation, notNullValue()); @@ -1103,7 +1103,7 @@ public void testEmptyAggregation() throws Exception { assertThat(response.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); + Bucket bucket = histo.getBuckets().get(1); assertThat(bucket, notNullValue()); ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index f97d886ae8df6..84e0bee396c9d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -127,7 +127,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); Stats stats = global.getAggregations().get("stats"); assertThat(stats, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index 37524dabe7f09..d50c101dbd5d1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -140,7 +140,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); Sum sum = global.getAggregations().get("sum"); assertThat(sum, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 47c443a58eeda..9c737cb734f16 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -190,7 +190,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); PercentileRanks values = global.getAggregations().get("percentile_ranks"); assertThat(values, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 98086451c3456..1c101324cd5fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -176,7 +176,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); Percentiles percentiles = global.getAggregations().get("percentiles"); assertThat(percentiles, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 991fe98612e3d..fc753b0844c46 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -461,7 +461,7 @@ public void testBasicsGetProperty() throws Exception { assertThat(global, notNullValue()); assertThat(global.getName(), equalTo("global")); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); TopHits topHits = global.getAggregations().get("hits"); assertThat(topHits, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index 7c5ab6600e365..c3feff6f3eaaa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -99,7 +99,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo(10L)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); ValueCount valueCount = global.getAggregations().get("count"); assertThat(valueCount, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 421a5d2d36254..0da75854b9ab2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.ExtendedStats.Bounds; import java.util.ArrayList; @@ -119,7 +119,7 @@ public void testGappyIndexWithSigma() { } else { expectedDocCount = 1; } - Histogram.Bucket bucket = buckets.get(i); + Bucket bucket = buckets.get(i); assertThat("i: " + i, bucket, notNullValue()); assertThat("i: " + i, ((Number) bucket.getKey()).longValue(), equalTo((long) i)); assertThat("i: " + i, bucket.getDocCount(), equalTo(expectedDocCount)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 0a6fceea9a3f1..d9d6979ffd710 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -2177,6 +2177,15 @@ public void testHighlightNoMatchSize() throws IOException { field.highlighterType("unified"); assertNotHighlighted(prepareSearch("test").highlighter(new HighlightBuilder().field(field)), 0, "text"); + + // Check when the requested fragment size equals the size of the string + var anotherText = "I am unusual and don't end with your regular )token)"; + indexDoc("test", "1", "text", anotherText); + refresh(); + for (String type : new String[] { "plain", "unified", "fvh" }) { + field.highlighterType(type).noMatchSize(anotherText.length()).numOfFragments(0); + assertHighlight(prepareSearch("test").highlighter(new HighlightBuilder().field(field)), 0, "text", 0, 1, equalTo(anotherText)); + } } public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 9d3ce1c99b553..076158ee22037 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -859,7 +859,7 @@ protected String contentType() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return new StringStoredFieldFieldLoader(name(), simpleName(), null) { + return new StringStoredFieldFieldLoader(fullPath(), leafName(), null) { @Override protected void write(XContentBuilder b, Object value) throws IOException { BytesRef ref = (BytesRef) value; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index bcecc49c2d463..422d6f06f2988 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -116,11 +116,8 @@ public void testScriptScoresWithAgg() throws Exception { ), response -> { assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); - assertThat( - ((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), - equalTo("1.0") - ); - assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); + assertThat(((Terms) response.getAggregations().get("score_agg")).getBuckets().get(0).getKeyAsString(), equalTo("1.0")); + assertThat(((Terms) response.getAggregations().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); } ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index 396af7e8501cf..d42a84677a8f7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -146,7 +146,6 @@ private static class LinearMultScoreFunction implements DecayFunction { @Override public double evaluate(double value, double scale) { - return value; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java index e631d17fc480c..7a3b1699c30e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java @@ -9,24 +9,14 @@ package org.elasticsearch.search.geo; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; -import java.util.Collection; -import java.util.Collections; public class GeoBoundingBoxQueryGeoPointIT extends GeoBoundingBoxQueryIntegTestCase { - @SuppressWarnings("deprecation") - @Override - protected Collection> nodePlugins() { - return Collections.singleton(TestGeoShapeFieldMapperPlugin.class); - } - @Override public XContentBuilder getMapping() throws IOException { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoShapeIT.java deleted file mode 100644 index 2b310f6b0ea3e..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoShapeIT.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.geo; - -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; -import org.elasticsearch.test.index.IndexVersionUtils; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; - -public class GeoBoundingBoxQueryGeoShapeIT extends GeoBoundingBoxQueryIntegTestCase { - - @SuppressWarnings("deprecation") - @Override - protected Collection> nodePlugins() { - return Collections.singleton(TestGeoShapeFieldMapperPlugin.class); - } - - @Override - public XContentBuilder getMapping() throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("location") - .field("type", "geo_shape"); - xContentBuilder.endObject().endObject().endObject().endObject(); - return xContentBuilder; - } - - @Override - public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java deleted file mode 100644 index c165ed02984e6..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.geo; - -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; -import org.elasticsearch.test.index.IndexVersionUtils; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; - -public class GeoShapeIT extends GeoShapeIntegTestCase { - - @SuppressWarnings("deprecation") - @Override - protected Collection> nodePlugins() { - return Collections.singleton(TestGeoShapeFieldMapperPlugin.class); - } - - @Override - protected void getGeoShapeMapping(XContentBuilder b) throws IOException { - b.field("type", "geo_shape"); - } - - @Override - protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); - } - - @Override - protected boolean allowExpensiveQueries() { - return true; - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java index 32ce485db5727..0d6d17cbaeb1f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java @@ -65,15 +65,23 @@ public class MockedRequestActionBasedRerankerIT extends AbstractRerankerIT { private static final String inferenceId = "inference-id"; private static final String inferenceText = "inference-text"; + private static final float minScore = 0.0f; @Override protected RankBuilder getRankBuilder(int rankWindowSize, String rankFeatureField) { - return new MockRequestActionBasedRankBuilder(rankWindowSize, rankFeatureField, inferenceId, inferenceText); + return new MockRequestActionBasedRankBuilder(rankWindowSize, rankFeatureField, inferenceId, inferenceText, minScore); } @Override protected RankBuilder getThrowingRankBuilder(int rankWindowSize, String rankFeatureField, ThrowingRankBuilderType type) { - return new ThrowingMockRequestActionBasedRankBuilder(rankWindowSize, rankFeatureField, inferenceId, inferenceText, type.name()); + return new ThrowingMockRequestActionBasedRankBuilder( + rankWindowSize, + rankFeatureField, + inferenceId, + inferenceText, + minScore, + type.name() + ); } @Override @@ -237,7 +245,8 @@ public static class TestRerankingRankFeaturePhaseRankCoordinatorContext extends int windowSize, Client client, String inferenceId, - String inferenceText + String inferenceText, + float minScore ) { super(size, from, windowSize); this.client = client; @@ -288,6 +297,7 @@ public static class MockRequestActionBasedRankBuilder extends RankBuilder { public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField INFERENCE_ID = new ParseField("inference_id"); public static final ParseField INFERENCE_TEXT = new ParseField("inference_text"); + public static final ParseField MIN_SCORE_FIELD = new ParseField("min_score"); static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "request_action_based_rank", args -> { @@ -298,7 +308,8 @@ public static class MockRequestActionBasedRankBuilder extends RankBuilder { } final String inferenceId = (String) args[2]; final String inferenceText = (String) args[3]; - return new MockRequestActionBasedRankBuilder(rankWindowSize, field, inferenceId, inferenceText); + final float minScore = (float) args[4]; + return new MockRequestActionBasedRankBuilder(rankWindowSize, field, inferenceId, inferenceText, minScore); } ); @@ -312,6 +323,7 @@ public static class MockRequestActionBasedRankBuilder extends RankBuilder { protected final String field; protected final String inferenceId; protected final String inferenceText; + protected final float minScore; public static MockRequestActionBasedRankBuilder fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); @@ -321,12 +333,14 @@ public MockRequestActionBasedRankBuilder( final int rankWindowSize, final String field, final String inferenceId, - final String inferenceText + final String inferenceText, + final float minScore ) { super(rankWindowSize); this.field = field; this.inferenceId = inferenceId; this.inferenceText = inferenceText; + this.minScore = minScore; } public MockRequestActionBasedRankBuilder(StreamInput in) throws IOException { @@ -334,6 +348,7 @@ public MockRequestActionBasedRankBuilder(StreamInput in) throws IOException { this.field = in.readString(); this.inferenceId = in.readString(); this.inferenceText = in.readString(); + this.minScore = in.readFloat(); } @Override @@ -341,6 +356,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(field); out.writeString(inferenceId); out.writeString(inferenceText); + out.writeFloat(minScore); } @Override @@ -348,6 +364,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.field(FIELD_FIELD.getPreferredName(), field); builder.field(INFERENCE_ID.getPreferredName(), inferenceId); builder.field(INFERENCE_TEXT.getPreferredName(), inferenceText); + builder.field(MIN_SCORE_FIELD.getPreferredName(), minScore); } @Override @@ -383,7 +400,8 @@ public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorCo rankWindowSize(), client, inferenceId, - inferenceText + inferenceText, + minScore ); } @@ -425,8 +443,16 @@ public static class ThrowingMockRequestActionBasedRankBuilder extends MockReques } final String inferenceId = (String) args[2]; final String inferenceText = (String) args[3]; - String throwingType = (String) args[4]; - return new ThrowingMockRequestActionBasedRankBuilder(rankWindowSize, field, inferenceId, inferenceText, throwingType); + final float minScore = (float) args[4]; + String throwingType = (String) args[5]; + return new ThrowingMockRequestActionBasedRankBuilder( + rankWindowSize, + field, + inferenceId, + inferenceText, + minScore, + throwingType + ); } ); @@ -449,9 +475,10 @@ public ThrowingMockRequestActionBasedRankBuilder( final String field, final String inferenceId, final String inferenceText, + final float minScore, final String throwingType ) { - super(rankWindowSize, field, inferenceId, inferenceText); + super(rankWindowSize, field, inferenceId, inferenceText, minScore); this.throwingRankBuilderType = ThrowingRankBuilderType.valueOf(throwingType); } @@ -526,7 +553,8 @@ public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorCo rankWindowSize(), client, inferenceId, - inferenceText + inferenceText, + minScore ) { @Override protected TestRerankingActionRequest generateRequest(List docFeatures) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index e6b2b86d0dbeb..836bd26f08eee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -47,7 +47,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.RemoteTransportException; import java.io.IOException; import java.nio.file.Files; @@ -677,6 +676,7 @@ public void testQueuedOperationsOnMasterRestart() throws Exception { public void testQueuedOperationsOnMasterDisconnect() throws Exception { internalCluster().startMasterOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(4, dataNode); final String repoName = "test-repo"; createRepository(repoName, "mock"); createIndexWithContent("index-one"); @@ -693,7 +693,7 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { .execute(); waitForBlock(masterNode, repoName); - final ActionFuture createThirdSnapshot = client(masterNode).admin() + final ActionFuture createSnapshot = client(masterNode).admin() .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-three") .setWaitForCompletion(true) @@ -714,8 +714,7 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { logger.info("--> make sure all failing requests get a response"); assertAcked(firstDeleteFuture.get()); assertAcked(secondDeleteFuture.get()); - expectThrows(SnapshotException.class, createThirdSnapshot); - + expectThrows(SnapshotException.class, createSnapshot); awaitNoMoreRunningOperations(); } @@ -788,18 +787,7 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception ensureStableCluster(3); awaitNoMoreRunningOperations(); - var innerException = expectThrows(ExecutionException.class, RuntimeException.class, deleteFuture::get); - - // There may be many layers of RTE to unwrap here, see https://github.com/elastic/elasticsearch/issues/102351. - // ExceptionsHelper#unwrapCause gives up at 10 layers of wrapping so we must unwrap more tenaciously by hand here: - while (true) { - if (innerException instanceof RemoteTransportException remoteTransportException) { - innerException = asInstanceOf(RuntimeException.class, remoteTransportException.getCause()); - } else { - assertThat(innerException, instanceOf(RepositoryException.class)); - break; - } - } + expectThrows(RepositoryException.class, deleteFuture::actionGet); } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 7c5f38fee02a9..1130ddaa74f38 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -31,10 +31,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class GetSnapshotsIT extends AbstractSnapshotIntegTestCase { @@ -314,6 +312,7 @@ public void testExcludePatterns() throws Exception { assertThat( clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots("non-existing*", otherPrefixSnapshot1, "-o*") + .setIgnoreUnavailable(true) .get() .getSnapshots(), empty() @@ -586,12 +585,17 @@ public void testRetrievingSnapshotsWhenRepositoryIsMissing() throws Exception { final List snapshotNames = createNSnapshots(repoName, randomIntBetween(1, 10)); snapshotNames.sort(String::compareTo); - final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName, missingRepoName) + final var oneRepoFuture = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName, missingRepoName) .setSort(SnapshotSortKey.NAME) - .get(); - assertThat(response.getSnapshots().stream().map(info -> info.snapshotId().getName()).toList(), equalTo(snapshotNames)); - assertTrue(response.getFailures().containsKey(missingRepoName)); - assertThat(response.getFailures().get(missingRepoName), instanceOf(RepositoryMissingException.class)); + .setIgnoreUnavailable(randomBoolean()) + .execute(); + expectThrows(RepositoryMissingException.class, oneRepoFuture::actionGet); + + final var multiRepoFuture = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName, missingRepoName) + .setSort(SnapshotSortKey.NAME) + .setIgnoreUnavailable(randomBoolean()) + .execute(); + expectThrows(RepositoryMissingException.class, multiRepoFuture::actionGet); } // Create a snapshot that is guaranteed to have a unique start time and duration for tests around ordering by either. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 600a3953d9bda..b155ef73783eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -52,7 +52,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.oneOf; @@ -395,16 +394,13 @@ public void testGetSnapshotsMultipleRepos() throws Exception { } logger.info("--> specify all snapshot names with ignoreUnavailable=false"); - GetSnapshotsResponse getSnapshotsResponse2 = client.admin() + final var failingFuture = client.admin() .cluster() .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, randomFrom("_all", "repo*")) .setIgnoreUnavailable(false) .setSnapshots(snapshotList.toArray(new String[0])) - .get(); - - for (String repo : repoList) { - assertThat(getSnapshotsResponse2.getFailures().get(repo), instanceOf(SnapshotMissingException.class)); - } + .execute(); + expectThrows(SnapshotMissingException.class, failingFuture::actionGet); logger.info("--> specify all snapshot names with ignoreUnavailable=true"); GetSnapshotsResponse getSnapshotsResponse3 = client.admin() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 9bcddd5c58d66..b8b6dcb25b557 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -216,11 +216,13 @@ private static class TrackedCluster { static final Logger logger = LogManager.getLogger(TrackedCluster.class); static final String CLIENT = "client"; + static final String NODE_RESTARTER = "node_restarter"; private final ThreadPool threadPool = new TestThreadPool( "TrackedCluster", // a single thread for "client" activities, to limit the number of activities all starting at once - new ScalingExecutorBuilder(CLIENT, 1, 1, TimeValue.ZERO, true, CLIENT) + new ScalingExecutorBuilder(CLIENT, 1, 1, TimeValue.ZERO, true, CLIENT), + new ScalingExecutorBuilder(NODE_RESTARTER, 1, 5, TimeValue.ZERO, true, NODE_RESTARTER) ); private final Executor clientExecutor = threadPool.executor(CLIENT); @@ -1163,7 +1165,7 @@ private void startNodeRestarter() { final String nodeName = trackedNode.nodeName; final Releasable releaseAll = localReleasables.transfer(); - threadPool.generic().execute(mustSucceed(() -> { + threadPool.executor(NODE_RESTARTER).execute(mustSucceed(() -> { logger.info("--> restarting [{}]", nodeName); cluster.restartNode(nodeName); logger.info("--> finished restarting [{}]", nodeName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/synonyms/SynonymsManagementAPIServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/synonyms/SynonymsManagementAPIServiceIT.java new file mode 100644 index 0000000000000..f6392954912ac --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/synonyms/SynonymsManagementAPIServiceIT.java @@ -0,0 +1,291 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.synonyms; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.synonyms.SynonymsTestUtils.randomSynonymRule; +import static org.elasticsearch.action.synonyms.SynonymsTestUtils.randomSynonymsSet; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class SynonymsManagementAPIServiceIT extends ESIntegTestCase { + + private SynonymsManagementAPIService synonymsManagementAPIService; + private int maxSynonymSets; + + @Override + protected Collection> nodePlugins() { + return List.of(ReindexPlugin.class, MapperExtrasPlugin.class); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + maxSynonymSets = randomIntBetween(100, 1000); + synonymsManagementAPIService = new SynonymsManagementAPIService(client(), maxSynonymSets); + } + + public void testCreateManySynonyms() throws Exception { + CountDownLatch putLatch = new CountDownLatch(1); + String synonymSetId = randomIdentifier(); + int rulesNumber = randomIntBetween(maxSynonymSets / 2, maxSynonymSets); + synonymsManagementAPIService.putSynonymsSet(synonymSetId, randomSynonymsSet(rulesNumber, rulesNumber), new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + assertEquals( + SynonymsManagementAPIService.UpdateSynonymsResultStatus.CREATED, + synonymsReloadResult.synonymsOperationResult() + ); + putLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + + putLatch.await(5, TimeUnit.SECONDS); + + CountDownLatch getLatch = new CountDownLatch(1); + // Also retrieve them + assertBusy(() -> { + synonymsManagementAPIService.getSynonymSetRules(synonymSetId, 0, maxSynonymSets, new ActionListener<>() { + @Override + public void onResponse(PagedResult synonymRulePagedResult) { + assertEquals(rulesNumber, synonymRulePagedResult.totalResults()); + assertEquals(rulesNumber, synonymRulePagedResult.pageResults().length); + getLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + }, 5, TimeUnit.SECONDS); + + getLatch.await(10, TimeUnit.SECONDS); + } + + public void testCreateTooManySynonymsAtOnce() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + synonymsManagementAPIService.putSynonymsSet( + randomIdentifier(), + randomSynonymsSet(maxSynonymSets + 1, maxSynonymSets * 2), + new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + fail("Shouldn't create synonyms that are too large"); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof IllegalArgumentException) { + latch.countDown(); + } else { + fail(e); + } + } + } + ); + + latch.await(5, TimeUnit.SECONDS); + } + + public void testCreateTooManySynonymsUsingRuleUpdates() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + int rulesToUpdate = randomIntBetween(1, 10); + int synonymsToCreate = maxSynonymSets - rulesToUpdate; + String synonymSetId = randomIdentifier(); + synonymsManagementAPIService.putSynonymsSet(synonymSetId, randomSynonymsSet(synonymsToCreate), new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + // Create as many rules as should fail + SynonymRule[] rules = randomSynonymsSet(atLeast(rulesToUpdate + 1)); + CountDownLatch updatedRulesLatch = new CountDownLatch(rulesToUpdate); + for (int i = 0; i < rulesToUpdate; i++) { + synonymsManagementAPIService.putSynonymRule(synonymSetId, rules[i], new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + updatedRulesLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + } + try { + updatedRulesLatch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + fail(e); + } + + // Updating more rules fails + int rulesToInsert = rules.length - rulesToUpdate; + CountDownLatch insertRulesLatch = new CountDownLatch(rulesToInsert); + for (int i = rulesToUpdate; i < rulesToInsert; i++) { + synonymsManagementAPIService.putSynonymRule( + // Error here + synonymSetId, + rules[i], + new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + fail("Shouldn't have been able to update a rule"); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof IllegalArgumentException == false) { + fail(e); + } + updatedRulesLatch.countDown(); + } + } + ); + } + try { + insertRulesLatch.await(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + fail(e); + } + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + + latch.await(5, TimeUnit.SECONDS); + } + + public void testUpdateRuleWithMaxSynonyms() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + String synonymSetId = randomIdentifier(); + SynonymRule[] synonymsSet = randomSynonymsSet(maxSynonymSets, maxSynonymSets); + synonymsManagementAPIService.putSynonymsSet(synonymSetId, synonymsSet, new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + // Updating a rule fails + synonymsManagementAPIService.putSynonymRule( + synonymSetId, + synonymsSet[randomIntBetween(0, maxSynonymSets - 1)], + new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Should update a rule that already exists at max capcity"); + } + } + ); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + + latch.await(5, TimeUnit.SECONDS); + } + + public void testCreateRuleWithMaxSynonyms() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + String synonymSetId = randomIdentifier(); + String ruleId = randomIdentifier(); + SynonymRule[] synonymsSet = randomSynonymsSet(maxSynonymSets, maxSynonymSets); + synonymsManagementAPIService.putSynonymsSet(synonymSetId, synonymsSet, new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + // Updating a rule fails + synonymsManagementAPIService.putSynonymRule(synonymSetId, randomSynonymRule(ruleId), new ActionListener<>() { + @Override + public void onResponse(SynonymsManagementAPIService.SynonymsReloadResult synonymsReloadResult) { + fail("Should not create a new rule that does not exist when at max capacity"); + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + } + }); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + + latch.await(5, TimeUnit.SECONDS); + } + + public void testTooManySynonymsOnIndexTriggersWarning() throws InterruptedException { + CountDownLatch insertLatch = new CountDownLatch(1); + String synonymSetId = randomIdentifier(); + synonymsManagementAPIService.bulkUpdateSynonymsSet( + synonymSetId, + randomSynonymsSet(atLeast(maxSynonymSets + 1)), + new ActionListener<>() { + @Override + public void onResponse(BulkResponse bulkItemResponses) { + insertLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + } + ); + + insertLatch.await(5, TimeUnit.SECONDS); + Logger logger = mock(Logger.class); + SynonymsManagementAPIService.logger = logger; + + CountDownLatch readLatch = new CountDownLatch(1); + synonymsManagementAPIService.getSynonymSetRules(synonymSetId, new ActionListener<>() { + @Override + public void onResponse(PagedResult synonymRulePagedResult) { + readLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Should not have been able to retrieve synonyms"); + } + }); + + readLatch.await(5, TimeUnit.SECONDS); + verify(logger).warn(anyString(), eq(synonymSetId)); + } +} diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index db7e3d40518ba..5f77a4211fefc 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -189,7 +189,6 @@ exports org.elasticsearch.common.compress; exports org.elasticsearch.common.document; exports org.elasticsearch.common.file; - exports org.elasticsearch.common.filesystem; exports org.elasticsearch.common.geo; exports org.elasticsearch.common.hash; exports org.elasticsearch.common.inject; @@ -431,6 +430,7 @@ org.elasticsearch.indices.IndicesFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.index.mapper.MapperFeatures, + org.elasticsearch.search.SearchFeatures, org.elasticsearch.script.ScriptFeatures, org.elasticsearch.search.retriever.RetrieversFeatures, org.elasticsearch.reservedstate.service.FileSettingsFeatures; @@ -449,7 +449,10 @@ with org.elasticsearch.index.codec.vectors.ES813FlatVectorFormat, org.elasticsearch.index.codec.vectors.ES813Int8FlatVectorFormat, - org.elasticsearch.index.codec.vectors.ES814HnswScalarQuantizedVectorsFormat; + org.elasticsearch.index.codec.vectors.ES814HnswScalarQuantizedVectorsFormat, + org.elasticsearch.index.codec.vectors.ES815HnswBitVectorsFormat, + org.elasticsearch.index.codec.vectors.ES815BitFlatVectorFormat; + provides org.apache.lucene.codecs.Codec with Elasticsearch814Codec; provides org.apache.logging.log4j.core.util.ContextDataProvider with org.elasticsearch.common.logging.DynamicContextDataProvider; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 0f9c77e810924..183094a1048d8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -197,6 +197,23 @@ static TransportVersion def(int id) { public static final TransportVersion ML_AD_OUTPUT_MEMORY_ALLOCATOR_FIELD = def(8_688_00_0); public static final TransportVersion FAILURE_STORE_LAZY_CREATION = def(8_689_00_0); public static final TransportVersion SNAPSHOT_REQUEST_TIMEOUTS = def(8_690_00_0); + public static final TransportVersion INDEX_METADATA_MAPPINGS_UPDATED_VERSION = def(8_691_00_0); + public static final TransportVersion ML_INFERENCE_ELAND_SETTINGS_ADDED = def(8_692_00_0); + public static final TransportVersion ML_ANTHROPIC_INTEGRATION_ADDED = def(8_693_00_0); + public static final TransportVersion ML_INFERENCE_GOOGLE_VERTEX_AI_EMBEDDINGS_ADDED = def(8_694_00_0); + public static final TransportVersion EVENT_INGESTED_RANGE_IN_CLUSTER_STATE = def(8_695_00_0); + public static final TransportVersion ESQL_ADD_AGGREGATE_TYPE = def(8_696_00_0); + public static final TransportVersion SECURITY_MIGRATIONS_MIGRATION_NEEDED_ADDED = def(8_697_00_0); + public static final TransportVersion K_FOR_KNN_QUERY_ADDED = def(8_698_00_0); + public static final TransportVersion TEXT_SIMILARITY_RERANKER_RETRIEVER = def(8_699_00_0); + public static final TransportVersion ML_INFERENCE_GOOGLE_VERTEX_AI_RERANKING_ADDED = def(8_700_00_0); + public static final TransportVersion VERSIONED_MASTER_NODE_REQUESTS = def(8_701_00_0); + public static final TransportVersion ML_INFERENCE_AMAZON_BEDROCK_ADDED = def(8_702_00_0); + public static final TransportVersion ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS = def(8_703_00_0); + public static final TransportVersion INFERENCE_ADAPTIVE_ALLOCATIONS = def(8_704_00_0); + public static final TransportVersion INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN = def(8_705_00_0); + public static final TransportVersion ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED = def(8_706_00_0); + public static final TransportVersion ENRICH_CACHE_STATS_SIZE_ADDED = def(8_707_00_0); /* * STOP! READ THIS FIRST! No, really, @@ -261,7 +278,7 @@ static TransportVersion def(int id) { * Reference to the minimum transport version that can be used with CCS. * This should be the transport version used by the previous minor release. */ - public static final TransportVersion MINIMUM_CCS_VERSION = V_8_13_0; + public static final TransportVersion MINIMUM_CCS_VERSION = SHUTDOWN_REQUEST_TIMEOUTS_FIX_8_14; static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b2c78453d9c75..fefe2ea486485 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -178,8 +178,11 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_14_0 = new Version(8_14_00_99); public static final Version V_8_14_1 = new Version(8_14_01_99); public static final Version V_8_14_2 = new Version(8_14_02_99); + public static final Version V_8_14_3 = new Version(8_14_03_99); + public static final Version V_8_14_4 = new Version(8_14_04_99); public static final Version V_8_15_0 = new Version(8_15_00_99); - public static final Version CURRENT = V_8_15_0; + public static final Version V_8_16_0 = new Version(8_16_00_99); + public static final Version CURRENT = V_8_16_0; private static final NavigableMap VERSION_IDS; private static final Map VERSION_STRINGS; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 1c41f2cdff37d..b550755ce7bdd 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -405,7 +405,7 @@ import org.elasticsearch.rest.action.synonyms.RestPutSynonymRuleAction; import org.elasticsearch.rest.action.synonyms.RestPutSynonymsAction; import org.elasticsearch.tasks.Task; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.usage.UsageService; @@ -470,7 +470,7 @@ public ActionModule( CircuitBreakerService circuitBreakerService, UsageService usageService, SystemIndices systemIndices, - Tracer tracer, + TelemetryProvider telemetryProvider, ClusterService clusterService, RerouteService rerouteService, List> reservedStateHandlers, @@ -513,12 +513,12 @@ public ActionModule( var customController = getRestServerComponent( "REST controller", actionPlugins, - restPlugin -> restPlugin.getRestController(restInterceptor, nodeClient, circuitBreakerService, usageService, tracer) + restPlugin -> restPlugin.getRestController(restInterceptor, nodeClient, circuitBreakerService, usageService, telemetryProvider) ); if (customController != null) { restController = customController; } else { - restController = new RestController(restInterceptor, nodeClient, circuitBreakerService, usageService, tracer); + restController = new RestController(restInterceptor, nodeClient, circuitBreakerService, usageService, telemetryProvider); } reservedClusterStateService = new ReservedClusterStateService(clusterService, rerouteService, reservedStateHandlers); this.restExtension = restExtension; diff --git a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java index e018cf48fcefc..564055aa36750 100644 --- a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java +++ b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java @@ -18,8 +18,6 @@ public final class NoShardAvailableActionException extends ElasticsearchException { - private static final StackTraceElement[] EMPTY_STACK_TRACE = new StackTraceElement[0]; - // This is set so that no StackTrace is serialized in the scenario when we wrap other shard failures. // It isn't necessary to serialize this field over the wire as the empty stack trace is serialized instead. private final boolean onShardFailureWrapper; @@ -57,8 +55,8 @@ public NoShardAvailableActionException(StreamInput in) throws IOException { } @Override - public StackTraceElement[] getStackTrace() { - return onShardFailureWrapper ? EMPTY_STACK_TRACE : super.getStackTrace(); + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace } @Override @@ -67,7 +65,7 @@ public void printStackTrace(PrintWriter s) { super.printStackTrace(s); } else { // Override to simply print the first line of the trace, which is the current exception. - // Since we aren't serializing the repetitive stacktrace onShardFailureWrapper, we shouldn't print it out either + // Note: This will also omit the cause chain or any suppressed exceptions. s.println(this); } } diff --git a/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java b/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java index 2ff0b476dc60b..822a75c4dec42 100644 --- a/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java +++ b/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java @@ -27,6 +27,10 @@ import java.util.Map; import java.util.Set; +/** + * Container for information about results of the resolution of index expression. + * Contains local indices, map of remote indices and metadata. + */ public class ResolvedIndices { @Nullable private final SearchContextId searchContextId; diff --git a/server/src/main/java/org/elasticsearch/action/UnavailableShardsException.java b/server/src/main/java/org/elasticsearch/action/UnavailableShardsException.java index b4120804c20df..647e98e3599f5 100644 --- a/server/src/main/java/org/elasticsearch/action/UnavailableShardsException.java +++ b/server/src/main/java/org/elasticsearch/action/UnavailableShardsException.java @@ -45,4 +45,9 @@ public UnavailableShardsException(StreamInput in) throws IOException { public RestStatus status() { return RestStatus.SERVICE_UNAVAILABLE; } + + @Override + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index 1e5f9d5d613d2..abb4f478cff54 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -16,6 +16,8 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationDecision; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -43,10 +45,14 @@ */ public final class ClusterAllocationExplanation implements ChunkedToXContentObject, Writeable { - static final String NO_SHARD_SPECIFIED_MESSAGE = "No shard was specified in the explain API request, so this response " - + "explains a randomly chosen unassigned shard. There may be other unassigned shards in this cluster which cannot be assigned for " - + "different reasons. It may not be possible to assign this shard until one of the other shards is assigned correctly. To explain " - + "the allocation of other shards (whether assigned or unassigned) you must specify the target shard in the request to this API."; + static final String NO_SHARD_SPECIFIED_MESSAGE = Strings.format( + """ + No shard was specified in the explain API request, so this response explains a randomly chosen unassigned shard. There may be \ + other unassigned shards in this cluster which cannot be assigned for different reasons. It may not be possible to assign this \ + shard until one of the other shards is assigned correctly. To explain the allocation of other shards (whether assigned or \ + unassigned) you must specify the target shard in the request to this API. See %s for more information.""", + ReferenceDocs.ALLOCATION_EXPLAIN_API + ); private final boolean specificShard; private final ShardRouting shardRouting; @@ -206,25 +212,23 @@ private Iterator getShardAllocationDecisionChunked(ToXCont } else { String explanation; if (shardRouting.state() == ShardRoutingState.RELOCATING) { - explanation = "the shard is in the process of relocating from node [" - + currentNode.getName() - + "] " - + "to node [" - + relocationTargetNode.getName() - + "], wait until relocation has completed"; + explanation = Strings.format( + "the shard is in the process of relocating from node [%s] to node [%s], wait until relocation has completed", + currentNode.getName(), + relocationTargetNode.getName() + ); } else { assert shardRouting.state() == ShardRoutingState.INITIALIZING; - explanation = "the shard is in the process of initializing on node [" - + currentNode.getName() - + "], " - + "wait until initialization has completed"; + explanation = Strings.format( + "the shard is in the process of initializing on node [%s], wait until initialization has completed", + currentNode.getName() + ); } return Iterators.single((builder, p) -> builder.field("explanation", explanation)); } } - private static XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) throws IOException { - + private static void unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) throws IOException { builder.startObject("unassigned_info"); builder.field("reason", unassignedInfo.reason()); builder.field("at", UnassignedInfo.DATE_TIME_FORMATTER.format(Instant.ofEpochMilli(unassignedInfo.unassignedTimeMillis()))); @@ -237,6 +241,5 @@ private static XContentBuilder unassignedInfoToXContent(UnassignedInfo unassigne } builder.field("last_allocation_status", AllocationDecision.fromAllocationStatus(unassignedInfo.lastAllocationStatus())); builder.endObject(); - return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 313ee83669017..8e6f029c71013 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -28,6 +28,8 @@ import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; @@ -160,11 +162,10 @@ public static ShardRouting findShardToExplain(ClusterAllocationExplainRequest re } } if (foundShard == null) { - throw new IllegalArgumentException( - "No shard was specified in the request which means the response should explain a randomly-chosen unassigned shard, " - + "but there are no unassigned shards in this cluster. To explain the allocation of an assigned shard you must " - + "specify the target shard in the request." - ); + throw new IllegalArgumentException(Strings.format(""" + No shard was specified in the request which means the response should explain a randomly-chosen unassigned shard, but \ + there are no unassigned shards in this cluster. To explain the allocation of an assigned shard you must specify the \ + target shard in the request. See %s for more information.""", ReferenceDocs.ALLOCATION_EXPLAIN_API)); } } else { String index = request.getIndex(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 8ef828d07d8b0..7c797444fc458 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.sort.SortOrder; @@ -220,13 +219,6 @@ public String[] policies() { return policies; } - public boolean isSingleRepositoryRequest() { - return repositories.length == 1 - && repositories[0] != null - && "_all".equals(repositories[0]) == false - && Regex.isSimpleMatchPattern(repositories[0]) == false; - } - /** * Returns the names of the snapshots. * diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 85c2ff2806ace..f7dedc21f93b6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.xcontent.ToXContent; @@ -33,6 +34,7 @@ public class GetSnapshotsResponse extends ActionResponse implements ChunkedToXCo private final List snapshots; + @UpdateForV9 // always empty, can be dropped private final Map failures; @Nullable diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index dd08746236fed..ff5fdbaa787fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; @@ -120,10 +119,14 @@ protected void masterOperation( ) { assert task instanceof CancellableTask : task + " not cancellable"; + final var resolvedRepositories = ResolvedRepositories.resolve(state, request.repositories()); + if (resolvedRepositories.hasMissingRepositories()) { + throw new RepositoryMissingException(String.join(", ", resolvedRepositories.missing())); + } + new GetSnapshotsOperation( (CancellableTask) task, - ResolvedRepositories.resolve(state, request.repositories()), - request.isSingleRepositoryRequest() == false, + resolvedRepositories.repositoryMetadata(), request.snapshots(), request.ignoreUnavailable(), request.policies(), @@ -151,7 +154,6 @@ private class GetSnapshotsOperation { // repositories private final List repositories; - private final boolean isMultiRepoRequest; // snapshots selection private final SnapshotNamePredicate snapshotNamePredicate; @@ -179,7 +181,6 @@ private class GetSnapshotsOperation { private final GetSnapshotInfoExecutor getSnapshotInfoExecutor; // results - private final Map failuresByRepository = ConcurrentCollections.newConcurrentMap(); private final Queue> allSnapshotInfos = ConcurrentCollections.newQueue(); /** @@ -195,8 +196,7 @@ private class GetSnapshotsOperation { GetSnapshotsOperation( CancellableTask cancellableTask, - ResolvedRepositories resolvedRepositories, - boolean isMultiRepoRequest, + List repositories, String[] snapshots, boolean ignoreUnavailable, String[] policies, @@ -211,8 +211,7 @@ private class GetSnapshotsOperation { boolean indices ) { this.cancellableTask = cancellableTask; - this.repositories = resolvedRepositories.repositoryMetadata(); - this.isMultiRepoRequest = isMultiRepoRequest; + this.repositories = repositories; this.ignoreUnavailable = ignoreUnavailable; this.sortBy = sortBy; this.order = order; @@ -232,10 +231,6 @@ private class GetSnapshotsOperation { threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), cancellableTask::isCancelled ); - - for (final var missingRepo : resolvedRepositories.missing()) { - failuresByRepository.put(missingRepo, new RepositoryMissingException(missingRepo)); - } } void getMultipleReposSnapshotInfo(ActionListener listener) { @@ -249,6 +244,10 @@ void getMultipleReposSnapshotInfo(ActionListener listener) continue; } + if (listeners.isFailing()) { + return; + } + SubscribableListener .newForked(repositoryDataListener -> { @@ -261,14 +260,7 @@ void getMultipleReposSnapshotInfo(ActionListener listener) .andThen((l, repositoryData) -> loadSnapshotInfos(repoName, repositoryData, l)) - .addListener(listeners.acquire().delegateResponse((l, e) -> { - if (isMultiRepoRequest && e instanceof ElasticsearchException elasticsearchException) { - failuresByRepository.put(repoName, elasticsearchException); - l.onResponse(null); - } else { - l.onFailure(e); - } - })); + .addListener(listeners.acquire()); } } }) @@ -503,7 +495,7 @@ private GetSnapshotsResponse buildResponse() { } return new GetSnapshotsResponse( snapshotInfos, - failuresByRepository, + null, remaining > 0 ? sortBy.encodeAfterQueryParam(snapshotInfos.get(snapshotInfos.size() - 1)) : null, totalCount.get(), remaining diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index d1da8f4abc9d6..923fc07ae14d2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -124,7 +124,7 @@ public record FieldMappingMetadata(String fullName, BytesReference source) imple private static final ParseField MAPPING = new ParseField("mapping"); /** - * Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. + * Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#fullPath}. */ public Map sourceAsMap() { return XContentHelper.convertToMap(source, true, XContentType.JSON).v2(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 679e344051873..27516e0ad5a7f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -158,12 +158,12 @@ private static Map findFieldMappings( for (String field : request.fields()) { if (Regex.isMatchAllPattern(field)) { for (Mapper fieldMapper : mappingLookup.fieldMappers()) { - addFieldMapper(fieldPredicate, fieldMapper.name(), fieldMapper, fieldMappings, request.includeDefaults()); + addFieldMapper(fieldPredicate, fieldMapper.fullPath(), fieldMapper, fieldMappings, request.includeDefaults()); } } else if (Regex.isSimpleMatchPattern(field)) { for (Mapper fieldMapper : mappingLookup.fieldMappers()) { - if (Regex.simpleMatch(field, fieldMapper.name())) { - addFieldMapper(fieldPredicate, fieldMapper.name(), fieldMapper, fieldMappings, request.includeDefaults()); + if (Regex.simpleMatch(field, fieldMapper.fullPath())) { + addFieldMapper(fieldPredicate, fieldMapper.fullPath(), fieldMapper, fieldMappings, request.includeDefaults()); } } } else { @@ -195,7 +195,7 @@ private static void addFieldMapper( includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS, false ); - fieldMappings.put(field, new FieldMappingMetadata(fieldMapper.name(), bytes)); + fieldMappings.put(field, new FieldMappingMetadata(fieldMapper.fullPath(), bytes)); } catch (IOException e) { throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java index 43fbe9513b57b..b4357c69c46ae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -63,8 +62,17 @@ protected void unpromotableShardOperation( UnpromotableShardRefreshRequest request, ActionListener responseListener ) { + // In edge cases, the search shard may still in the process of being created when a refresh request arrives. + // We simply respond OK to the request because when the search shard recovers later it will use the latest + // commit from the proper indexing shard. + final var indexService = indicesService.indexService(request.shardId().getIndex()); + final var shard = indexService == null ? null : indexService.getShardOrNull(request.shardId().id()); + if (shard == null) { + responseListener.onResponse(ActionResponse.Empty.INSTANCE); + return; + } + ActionListener.run(responseListener, listener -> { - IndexShard shard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); shard.waitForPrimaryTermAndGeneration( request.getPrimaryTerm(), request.getSegmentGeneration(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 253f02d30465a..eb90e95cb08be 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.Task; @@ -214,8 +215,12 @@ public static ClusterState resolveTemporaryState( .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) .build(); - final IndexMetadata indexMetadata = IndexMetadata.builder(indexName).settings(dummySettings).build(); + final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + // handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version + .eventIngestedRange(getEventIngestedRange(indexName, simulatedState), simulatedState.getMinTransportVersion()) + .settings(dummySettings) + .build(); return ClusterState.builder(simulatedState) .metadata(Metadata.builder(simulatedState.metadata()).put(indexMetadata, true).build()) .build(); @@ -279,7 +284,11 @@ public static Template resolveTemplate( // Then apply settings resolved from templates: dummySettings.put(templateSettings); - final IndexMetadata indexMetadata = IndexMetadata.builder(indexName).settings(dummySettings).build(); + final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + // handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version + .eventIngestedRange(getEventIngestedRange(indexName, simulatedState), simulatedState.getMinTransportVersion()) + .settings(dummySettings) + .build(); final ClusterState tempClusterState = ClusterState.builder(simulatedState) .metadata(Metadata.builder(simulatedState.metadata()).put(indexMetadata, true).build()) @@ -321,4 +330,9 @@ public static Template resolveTemplate( } return new Template(settings, mergedMapping, aliasesByName, lifecycle); } + + private static IndexLongFieldRange getEventIngestedRange(String indexName, ClusterState simulatedState) { + final IndexMetadata indexMetadata = simulatedState.metadata().index(indexName); + return indexMetadata == null ? IndexLongFieldRange.NO_SHARDS : indexMetadata.getEventIngestedRange(); + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index b9f753189c077..258e5b4c9a58d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -294,8 +294,8 @@ private Map> groupRequestsByShards( ia = concreteIndices.resolveIfAbsent(docWriteRequest); indexOperationValidator.accept(ia, docWriteRequest); - TransportBulkAction.prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); - TransportBulkAction.prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); + TransportBulkAction.prohibitCustomRoutingOnDataStream(docWriteRequest, ia); + TransportBulkAction.prohibitAppendWritesInBackingIndices(docWriteRequest, ia); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); final Index concreteIndex = docWriteRequest.getConcreteWriteIndex(ia, metadata); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java new file mode 100644 index 0000000000000..ff306cfb08745 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -0,0 +1,346 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.IngestActionForwarder; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Objects; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.function.LongSupplier; + +/** + * This is an abstract base class for bulk actions. It traverses all indices that the request gets routed to, executes all applicable + * pipelines, and then delegates to the concrete implementation of #doInternalExecute to actually index the data. + */ +public abstract class TransportAbstractBulkAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportAbstractBulkAction.class); + + protected final ThreadPool threadPool; + protected final ClusterService clusterService; + protected final IndexingPressure indexingPressure; + protected final SystemIndices systemIndices; + private final IngestService ingestService; + private final IngestActionForwarder ingestForwarder; + protected final LongSupplier relativeTimeProvider; + protected final Executor writeExecutor; + protected final Executor systemWriteExecutor; + private final ActionType bulkAction; + + public TransportAbstractBulkAction( + ActionType action, + TransportService transportService, + ActionFilters actionFilters, + Writeable.Reader requestReader, + ThreadPool threadPool, + ClusterService clusterService, + IngestService ingestService, + IndexingPressure indexingPressure, + SystemIndices systemIndices, + LongSupplier relativeTimeProvider + ) { + super(action.name(), transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.threadPool = threadPool; + this.clusterService = clusterService; + this.ingestService = ingestService; + this.indexingPressure = indexingPressure; + this.systemIndices = systemIndices; + this.writeExecutor = threadPool.executor(ThreadPool.Names.WRITE); + this.systemWriteExecutor = threadPool.executor(ThreadPool.Names.SYSTEM_WRITE); + this.ingestForwarder = new IngestActionForwarder(transportService); + clusterService.addStateApplier(this.ingestForwarder); + this.relativeTimeProvider = relativeTimeProvider; + this.bulkAction = action; + } + + @Override + protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener listener) { + /* + * This is called on the Transport thread so we can check the indexing + * memory pressure *quickly* but we don't want to keep the transport + * thread busy. Then, as soon as we have the indexing pressure in we fork + * to one of the write thread pools. We do this because juggling the + * bulk request can get expensive for a few reasons: + * 1. Figuring out which shard should receive a bulk request might require + * parsing the _source. + * 2. When dispatching the sub-requests to shards we may have to compress + * them. LZ4 is super fast, but slow enough that it's best not to do it + * on the transport thread, especially for large sub-requests. + * + * We *could* detect these cases and only fork in then, but that is complex + * to get right and the fork is fairly low overhead. + */ + final int indexingOps = bulkRequest.numberOfActions(); + final long indexingBytes = bulkRequest.ramBytesUsed(); + final boolean isOnlySystem = TransportBulkAction.isOnlySystem( + bulkRequest, + clusterService.state().metadata().getIndicesLookup(), + systemIndices + ); + final Releasable releasable = indexingPressure.markCoordinatingOperationStarted(indexingOps, indexingBytes, isOnlySystem); + final ActionListener releasingListener = ActionListener.runBefore(listener, releasable::close); + final Executor executor = isOnlySystem ? systemWriteExecutor : writeExecutor; + ensureClusterStateThenForkAndExecute(task, bulkRequest, executor, releasingListener); + } + + private void ensureClusterStateThenForkAndExecute( + Task task, + BulkRequest bulkRequest, + Executor executor, + ActionListener releasingListener + ) { + final ClusterState initialState = clusterService.state(); + final ClusterBlockException blockException = initialState.blocks().globalBlockedException(ClusterBlockLevel.WRITE); + if (blockException != null) { + if (false == blockException.retryable()) { + releasingListener.onFailure(blockException); + return; + } + logger.trace("cluster is blocked, waiting for it to recover", blockException); + final ClusterStateObserver clusterStateObserver = new ClusterStateObserver( + initialState, + clusterService, + bulkRequest.timeout(), + logger, + threadPool.getThreadContext() + ); + clusterStateObserver.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + forkAndExecute(task, bulkRequest, executor, releasingListener); + } + + @Override + public void onClusterServiceClose() { + releasingListener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + releasingListener.onFailure(blockException); + } + }, newState -> false == newState.blocks().hasGlobalBlockWithLevel(ClusterBlockLevel.WRITE)); + } else { + forkAndExecute(task, bulkRequest, executor, releasingListener); + } + } + + private void forkAndExecute(Task task, BulkRequest bulkRequest, Executor executor, ActionListener releasingListener) { + executor.execute(new ActionRunnable<>(releasingListener) { + @Override + protected void doRun() { + applyPipelinesAndDoInternalExecute(task, bulkRequest, executor, releasingListener); + } + }); + } + + private boolean applyPipelines(Task task, BulkRequest bulkRequest, Executor executor, ActionListener listener) { + boolean hasIndexRequestsWithPipelines = false; + final Metadata metadata = clusterService.state().getMetadata(); + for (DocWriteRequest actionRequest : bulkRequest.requests) { + IndexRequest indexRequest = getIndexWriteRequest(actionRequest); + if (indexRequest != null) { + IngestService.resolvePipelinesAndUpdateIndexRequest(actionRequest, indexRequest, metadata); + hasIndexRequestsWithPipelines |= IngestService.hasPipeline(indexRequest); + } + + if (actionRequest instanceof IndexRequest ir) { + if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) { + throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally"); + } + } + } + + if (hasIndexRequestsWithPipelines) { + // this method (doExecute) will be called again, but with the bulk requests updated from the ingest node processing but + // also with IngestService.NOOP_PIPELINE_NAME on each request. This ensures that this on the second time through this method, + // this path is never taken. + ActionListener.run(listener, l -> { + if (Assertions.ENABLED) { + final boolean arePipelinesResolved = bulkRequest.requests() + .stream() + .map(TransportBulkAction::getIndexWriteRequest) + .filter(Objects::nonNull) + .allMatch(IndexRequest::isPipelineResolved); + assert arePipelinesResolved : bulkRequest; + } + if (clusterService.localNode().isIngestNode()) { + processBulkIndexIngestRequest(task, bulkRequest, executor, metadata, l); + } else { + ingestForwarder.forwardIngestRequest(bulkAction, bulkRequest, l); + } + }); + return true; + } + return false; + } + + private void processBulkIndexIngestRequest( + Task task, + BulkRequest original, + Executor executor, + Metadata metadata, + ActionListener listener + ) { + final long ingestStartTimeInNanos = System.nanoTime(); + final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); + getIngestService(original).executeBulkRequest( + original.numberOfActions(), + () -> bulkRequestModifier, + bulkRequestModifier::markItemAsDropped, + (indexName) -> shouldStoreFailure(indexName, metadata, threadPool.absoluteTimeInMillis()), + bulkRequestModifier::markItemForFailureStore, + bulkRequestModifier::markItemAsFailed, + (originalThread, exception) -> { + if (exception != null) { + logger.debug("failed to execute pipeline for a bulk request", exception); + listener.onFailure(exception); + } else { + long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); + BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); + ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded( + ingestTookInMillis, + listener + ); + if (bulkRequest.requests().isEmpty()) { + // at this stage, the transport bulk action can't deal with a bulk request with no requests, + // so we stop and send an empty response back to the client. + // (this will happen if pre-processing all items in the bulk failed) + actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0)); + } else { + ActionRunnable runnable = new ActionRunnable<>(actionListener) { + @Override + protected void doRun() { + applyPipelinesAndDoInternalExecute(task, bulkRequest, executor, actionListener); + } + + @Override + public boolean isForceExecution() { + // If we fork back to a write thread we **not** should fail, because tp queue is full. + // (Otherwise the work done during ingest will be lost) + // It is okay to force execution here. Throttling of write requests happens prior to + // ingest when a node receives a bulk request. + return true; + } + }; + // If a processor went async and returned a response on a different thread then + // before we continue the bulk request we should fork back on a write thread: + if (originalThread == Thread.currentThread()) { + runnable.run(); + } else { + executor.execute(runnable); + } + } + } + }, + executor + ); + } + + /** + * Determines if an index name is associated with either an existing data stream or a template + * for one that has the failure store enabled. + * @param indexName The index name to check. + * @param metadata Cluster state metadata. + * @param epochMillis A timestamp to use when resolving date math in the index name. + * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store + * or if it matches a template that has a data stream failure store enabled. + */ + protected abstract boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis); + + /** + * Retrieves the {@link IndexRequest} from the provided {@link DocWriteRequest} for index or upsert actions. Upserts are + * modeled as {@link IndexRequest} inside the {@link UpdateRequest}. Ignores {@link org.elasticsearch.action.delete.DeleteRequest}'s + * + * @param docWriteRequest The request to find the {@link IndexRequest} + * @return the found {@link IndexRequest} or {@code null} if one can not be found. + */ + public static IndexRequest getIndexWriteRequest(DocWriteRequest docWriteRequest) { + IndexRequest indexRequest = null; + if (docWriteRequest instanceof IndexRequest) { + indexRequest = (IndexRequest) docWriteRequest; + } else if (docWriteRequest instanceof UpdateRequest updateRequest) { + indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); + } + return indexRequest; + } + + /* + * This returns the IngestService to be used for the given request. The default implementation ignores the request and always returns + * the same ingestService, but child classes might use information in the request in creating an IngestService specific to that request. + */ + protected IngestService getIngestService(BulkRequest request) { + return ingestService; + } + + protected long relativeTime() { + return relativeTimeProvider.getAsLong(); + } + + protected long buildTookInMillis(long startTimeNanos) { + return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); + } + + private void applyPipelinesAndDoInternalExecute( + Task task, + BulkRequest bulkRequest, + Executor executor, + ActionListener listener + ) { + final long relativeStartTime = threadPool.relativeTimeInMillis(); + if (applyPipelines(task, bulkRequest, executor, listener) == false) { + doInternalExecute(task, bulkRequest, executor, listener, relativeStartTime); + } + } + + /** + * This method creates any missing resources and actually applies the BulkRequest to the relevant indices + * @param task The task in which this work is being done + * @param bulkRequest The BulkRequest of changes to make to indices + * @param executor The executor for the thread pool in which the work is to be done + * @param listener The listener to be notified of results + * @param relativeStartTimeNanos The relative start time of this bulk load, to be used in computing the time taken for the BulkResponse + */ + protected abstract void doInternalExecute( + Task task, + BulkRequest bulkRequest, + Executor executor, + ActionListener listener, + long relativeStartTimeNanos + ); + +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index b14a63362cb9f..d9d5bc92a24d1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -24,20 +24,14 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.IngestActionForwarder; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; -import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateObserver; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -48,10 +42,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.core.Assertions; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -59,10 +49,8 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; import java.util.HashMap; @@ -73,7 +61,6 @@ import java.util.Set; import java.util.SortedMap; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.LongSupplier; @@ -84,29 +71,18 @@ * Groups bulk request items by shard, optionally creating non-existent indices and * delegates to {@link TransportShardBulkAction} for shard-level bulk execution */ -public class TransportBulkAction extends HandledTransportAction { +public class TransportBulkAction extends TransportAbstractBulkAction { public static final String NAME = "indices:data/write/bulk"; public static final ActionType TYPE = new ActionType<>(NAME); private static final Logger logger = LogManager.getLogger(TransportBulkAction.class); public static final String LAZY_ROLLOVER_ORIGIN = "lazy_rollover"; - private final ActionType bulkAction; - private final ThreadPool threadPool; - private final ClusterService clusterService; - private final IngestService ingestService; private final FeatureService featureService; - private final LongSupplier relativeTimeProvider; - private final IngestActionForwarder ingestForwarder; private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; - private final IndexingPressure indexingPressure; - private final SystemIndices systemIndices; private final OriginSettingClient rolloverClient; - private final Executor writeExecutor; - private final Executor systemWriteExecutor; - @Inject public TransportBulkAction( ThreadPool threadPool, @@ -180,40 +156,23 @@ public TransportBulkAction( SystemIndices systemIndices, LongSupplier relativeTimeProvider ) { - super(bulkAction.name(), transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); + super( + bulkAction, + transportService, + actionFilters, + requestReader, + threadPool, + clusterService, + ingestService, + indexingPressure, + systemIndices, + relativeTimeProvider + ); Objects.requireNonNull(relativeTimeProvider); - this.bulkAction = bulkAction; - this.threadPool = threadPool; - this.clusterService = clusterService; - this.ingestService = ingestService; this.featureService = featureService; - this.relativeTimeProvider = relativeTimeProvider; - this.ingestForwarder = new IngestActionForwarder(transportService); this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; - this.indexingPressure = indexingPressure; - this.systemIndices = systemIndices; - clusterService.addStateApplier(this.ingestForwarder); this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); - this.writeExecutor = threadPool.executor(Names.WRITE); - this.systemWriteExecutor = threadPool.executor(Names.SYSTEM_WRITE); - } - - /** - * Retrieves the {@link IndexRequest} from the provided {@link DocWriteRequest} for index or upsert actions. Upserts are - * modeled as {@link IndexRequest} inside the {@link UpdateRequest}. Ignores {@link org.elasticsearch.action.delete.DeleteRequest}'s - * - * @param docWriteRequest The request to find the {@link IndexRequest} - * @return the found {@link IndexRequest} or {@code null} if one can not be found. - */ - public static IndexRequest getIndexWriteRequest(DocWriteRequest docWriteRequest) { - IndexRequest indexRequest = null; - if (docWriteRequest instanceof IndexRequest) { - indexRequest = (IndexRequest) docWriteRequest; - } else if (docWriteRequest instanceof UpdateRequest updateRequest) { - indexRequest = updateRequest.docAsUpsert() ? updateRequest.doc() : updateRequest.upsertRequest(); - } - return indexRequest; } public static ActionListener unwrappingSingleItemBulkResponse( @@ -233,123 +192,13 @@ public static ActionListe } @Override - protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener listener) { - /* - * This is called on the Transport thread so we can check the indexing - * memory pressure *quickly* but we don't want to keep the transport - * thread busy. Then, as soon as we have the indexing pressure in we fork - * to one of the write thread pools. We do this because juggling the - * bulk request can get expensive for a few reasons: - * 1. Figuring out which shard should receive a bulk request might require - * parsing the _source. - * 2. When dispatching the sub-requests to shards we may have to compress - * them. LZ4 is super fast, but slow enough that it's best not to do it - * on the transport thread, especially for large sub-requests. - * - * We *could* detect these cases and only fork in then, but that is complex - * to get right and the fork is fairly low overhead. - */ - final int indexingOps = bulkRequest.numberOfActions(); - final long indexingBytes = bulkRequest.ramBytesUsed(); - final boolean isOnlySystem = isOnlySystem(bulkRequest, clusterService.state().metadata().getIndicesLookup(), systemIndices); - final Releasable releasable = indexingPressure.markCoordinatingOperationStarted(indexingOps, indexingBytes, isOnlySystem); - final ActionListener releasingListener = ActionListener.runBefore(listener, releasable::close); - final Executor executor = isOnlySystem ? systemWriteExecutor : writeExecutor; - ensureClusterStateThenForkAndExecute(task, bulkRequest, executor, releasingListener); - } - - private void ensureClusterStateThenForkAndExecute( + protected void doInternalExecute( Task task, BulkRequest bulkRequest, Executor executor, - ActionListener releasingListener + ActionListener listener, + long relativeStartTime ) { - final ClusterState initialState = clusterService.state(); - final ClusterBlockException blockException = initialState.blocks().globalBlockedException(ClusterBlockLevel.WRITE); - if (blockException != null) { - if (false == blockException.retryable()) { - releasingListener.onFailure(blockException); - return; - } - logger.trace("cluster is blocked, waiting for it to recover", blockException); - final ClusterStateObserver clusterStateObserver = new ClusterStateObserver( - initialState, - clusterService, - bulkRequest.timeout(), - logger, - threadPool.getThreadContext() - ); - clusterStateObserver.waitForNextChange(new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - forkAndExecute(task, bulkRequest, executor, releasingListener); - } - - @Override - public void onClusterServiceClose() { - releasingListener.onFailure(new NodeClosedException(clusterService.localNode())); - } - - @Override - public void onTimeout(TimeValue timeout) { - releasingListener.onFailure(blockException); - } - }, newState -> false == newState.blocks().hasGlobalBlockWithLevel(ClusterBlockLevel.WRITE)); - } else { - forkAndExecute(task, bulkRequest, executor, releasingListener); - } - } - - private void forkAndExecute(Task task, BulkRequest bulkRequest, Executor executor, ActionListener releasingListener) { - executor.execute(new ActionRunnable<>(releasingListener) { - @Override - protected void doRun() { - doInternalExecute(task, bulkRequest, executor, releasingListener); - } - }); - } - - protected void doInternalExecute(Task task, BulkRequest bulkRequest, Executor executor, ActionListener listener) { - final long startTime = relativeTime(); - - boolean hasIndexRequestsWithPipelines = false; - final Metadata metadata = clusterService.state().getMetadata(); - for (DocWriteRequest actionRequest : bulkRequest.requests) { - IndexRequest indexRequest = getIndexWriteRequest(actionRequest); - if (indexRequest != null) { - IngestService.resolvePipelinesAndUpdateIndexRequest(actionRequest, indexRequest, metadata); - hasIndexRequestsWithPipelines |= IngestService.hasPipeline(indexRequest); - } - - if (actionRequest instanceof IndexRequest ir) { - if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) { - throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally"); - } - } - } - - if (hasIndexRequestsWithPipelines) { - // this method (doExecute) will be called again, but with the bulk requests updated from the ingest node processing but - // also with IngestService.NOOP_PIPELINE_NAME on each request. This ensures that this on the second time through this method, - // this path is never taken. - ActionListener.run(listener, l -> { - if (Assertions.ENABLED) { - final boolean arePipelinesResolved = bulkRequest.requests() - .stream() - .map(TransportBulkAction::getIndexWriteRequest) - .filter(Objects::nonNull) - .allMatch(IndexRequest::isPipelineResolved); - assert arePipelinesResolved : bulkRequest; - } - if (clusterService.localNode().isIngestNode()) { - processBulkIndexIngestRequest(task, bulkRequest, executor, metadata, l); - } else { - ingestForwarder.forwardIngestRequest(bulkAction, bulkRequest, l); - } - }); - return; - } - Map indicesToAutoCreate = new HashMap<>(); Set dataStreamsToBeRolledOver = new HashSet<>(); Set failureStoresToBeRolledOver = new HashSet<>(); @@ -363,7 +212,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, Executor ex indicesToAutoCreate, dataStreamsToBeRolledOver, failureStoresToBeRolledOver, - startTime + relativeStartTime ); } @@ -568,21 +417,12 @@ private static void failRequestsWhenPrerequisiteActionFailed( } } - /* - * This returns the IngestService to be used for the given request. The default implementation ignores the request and always returns - * the same ingestService, but child classes might use information in the request in creating an IngestService specific to that request. - */ - protected IngestService getIngestService(BulkRequest request) { - return ingestService; - } - - static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, Metadata metadata) { + static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, IndexAbstraction indexAbstraction) { DocWriteRequest.OpType opType = writeRequest.opType(); if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { // op type not create or index, then bail early return; } - IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(writeRequest.index()); if (indexAbstraction == null) { return; } @@ -611,9 +451,7 @@ static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest + "] instead" ); } - if (opType == DocWriteRequest.OpType.INDEX - && writeRequest.ifPrimaryTerm() == UNASSIGNED_PRIMARY_TERM - && writeRequest.ifSeqNo() == UNASSIGNED_SEQ_NO) { + if (writeRequest.ifPrimaryTerm() == UNASSIGNED_PRIMARY_TERM && writeRequest.ifSeqNo() == UNASSIGNED_SEQ_NO) { throw new IllegalArgumentException( "index request with op_type=index and no if_primary_term and if_seq_no set " + "targeting backing indices is disallowed, target corresponding data stream [" @@ -623,8 +461,7 @@ static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest } } - static void prohibitCustomRoutingOnDataStream(DocWriteRequest writeRequest, Metadata metadata) { - IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(writeRequest.index()); + static void prohibitCustomRoutingOnDataStream(DocWriteRequest writeRequest, IndexAbstraction indexAbstraction) { if (indexAbstraction == null) { return; } @@ -677,10 +514,6 @@ private static boolean setResponseFailureIfIndexMatches( return false; } - protected long buildTookInMillis(long startTimeNanos) { - return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); - } - void executeBulk( Task task, BulkRequest bulkRequest, @@ -706,72 +539,6 @@ void executeBulk( ).run(); } - private long relativeTime() { - return relativeTimeProvider.getAsLong(); - } - - private void processBulkIndexIngestRequest( - Task task, - BulkRequest original, - Executor executor, - Metadata metadata, - ActionListener listener - ) { - final long ingestStartTimeInNanos = System.nanoTime(); - final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - getIngestService(original).executeBulkRequest( - original.numberOfActions(), - () -> bulkRequestModifier, - bulkRequestModifier::markItemAsDropped, - (indexName) -> shouldStoreFailure(indexName, metadata, threadPool.absoluteTimeInMillis()), - bulkRequestModifier::markItemForFailureStore, - bulkRequestModifier::markItemAsFailed, - (originalThread, exception) -> { - if (exception != null) { - logger.debug("failed to execute pipeline for a bulk request", exception); - listener.onFailure(exception); - } else { - long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); - BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); - ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded( - ingestTookInMillis, - listener - ); - if (bulkRequest.requests().isEmpty()) { - // at this stage, the transport bulk action can't deal with a bulk request with no requests, - // so we stop and send an empty response back to the client. - // (this will happen if pre-processing all items in the bulk failed) - actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0)); - } else { - ActionRunnable runnable = new ActionRunnable<>(actionListener) { - @Override - protected void doRun() { - doInternalExecute(task, bulkRequest, executor, actionListener); - } - - @Override - public boolean isForceExecution() { - // If we fork back to a write thread we **not** should fail, because tp queue is full. - // (Otherwise the work done during ingest will be lost) - // It is okay to force execution here. Throttling of write requests happens prior to - // ingest when a node receives a bulk request. - return true; - } - }; - // If a processor went async and returned a response on a different thread then - // before we continue the bulk request we should fork back on a write thread: - if (originalThread == Thread.currentThread()) { - runnable.run(); - } else { - executor.execute(runnable); - } - } - } - }, - executor - ); - } - /** * Determines if an index name is associated with either an existing data stream or a template * for one that has the failure store enabled. @@ -781,13 +548,18 @@ public boolean isForceExecution() { * @return true if the given index name corresponds to a data stream with a failure store, * or if it matches a template that has a data stream failure store enabled. */ - static boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis) { + static boolean shouldStoreFailureInternal(String indexName, Metadata metadata, long epochMillis) { return DataStream.isFailureStoreFeatureFlagEnabled() && resolveFailureStoreFromMetadata(indexName, metadata, epochMillis).or( () -> resolveFailureStoreFromTemplate(indexName, metadata) ).orElse(false); } + @Override + protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { + return shouldStoreFailureInternal(indexName, metadata, time); + } + /** * Determines if an index name is associated with an existing data stream that has a failure store enabled. * @param indexName The index name to check. diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index aca7c8752ef4d..67e7e3be72a02 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -67,10 +67,10 @@ import java.io.IOException; import java.util.Map; -import java.util.Optional; import java.util.concurrent.Executor; import java.util.function.Consumer; import java.util.function.LongSupplier; +import java.util.function.ObjLongConsumer; import static org.elasticsearch.core.Strings.format; @@ -151,7 +151,7 @@ protected void dispatchedShardOperationOnPrimary( assert update != null; assert shardId != null; mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), update, mappingListener); - }, mappingUpdateListener -> observer.waitForNextChange(new ClusterStateObserver.Listener() { + }, (mappingUpdateListener, initialMappingVersion) -> observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { mappingUpdateListener.onResponse(null); @@ -166,6 +166,9 @@ public void onClusterServiceClose() { public void onTimeout(TimeValue timeout) { mappingUpdateListener.onFailure(new MapperException("timed out while waiting for a dynamic mapping update")); } + }, clusterState -> { + var indexMetadata = clusterState.metadata().index(primary.shardId().getIndex()); + return indexMetadata == null || (indexMetadata.mapping() != null && indexMetadata.getMappingVersion() != initialMappingVersion); }), listener, executor(primary), postWriteRefresh, postWriteAction, documentParsingProvider); } @@ -185,7 +188,7 @@ public static void performOnPrimary( UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, MappingUpdatePerformer mappingUpdater, - Consumer> waitForMappingUpdate, + ObjLongConsumer> waitForMappingUpdate, ActionListener> listener, Executor executor ) { @@ -210,7 +213,7 @@ public static void performOnPrimary( UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, MappingUpdatePerformer mappingUpdater, - Consumer> waitForMappingUpdate, + ObjLongConsumer> waitForMappingUpdate, ActionListener> listener, Executor executor, @Nullable PostWriteRefresh postWriteRefresh, @@ -309,7 +312,7 @@ static boolean executeBulkItemRequest( UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, MappingUpdatePerformer mappingUpdater, - Consumer> waitForMappingUpdate, + ObjLongConsumer> waitForMappingUpdate, ActionListener itemDoneListener, DocumentParsingProvider documentParsingProvider ) throws Exception { @@ -359,7 +362,8 @@ static boolean executeBulkItemRequest( ); } else { final IndexRequest request = context.getRequestToExecute(); - DocumentSizeObserver documentSizeObserver = getDocumentSizeObserver(documentParsingProvider, request); + + DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(request); context.setDocumentSizeObserver(documentSizeObserver); final SourceToParse sourceToParse = new SourceToParse( @@ -379,83 +383,80 @@ static boolean executeBulkItemRequest( request.getAutoGeneratedTimestamp(), request.isRetry() ); - + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + return handleMappingUpdateRequired( + context, + mappingUpdater, + waitForMappingUpdate, + itemDoneListener, + primary, + result, + version, + updateResult + ); + } } - if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { - - try { - Optional mergedSource = Optional.ofNullable( - primary.mapperService() - .merge( - MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(result.getRequiredMappingUpdate()), - MapperService.MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT - ) - ).map(DocumentMapper::mappingSource); - Optional previousSource = Optional.ofNullable(primary.mapperService().documentMapper()) - .map(DocumentMapper::mappingSource); + onComplete(result, context, updateResult); + return true; + } - if (mergedSource.equals(previousSource)) { - context.resetForNoopMappingUpdateRetry(primary.mapperService().mappingVersion()); - return true; - } - } catch (Exception e) { - logger.info(() -> format("%s mapping update rejected by primary", primary.shardId()), e); - assert result.getId() != null; - onComplete(exceptionToResult(e, primary, isDelete, version, result.getId()), context, updateResult); + private static boolean handleMappingUpdateRequired( + BulkPrimaryExecutionContext context, + MappingUpdatePerformer mappingUpdater, + ObjLongConsumer> waitForMappingUpdate, + ActionListener itemDoneListener, + IndexShard primary, + Engine.Result result, + long version, + UpdateHelper.Result updateResult + ) { + final var mapperService = primary.mapperService(); + final long initialMappingVersion = mapperService.mappingVersion(); + try { + CompressedXContent mergedSource = mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(result.getRequiredMappingUpdate()), + MapperService.MergeReason.MAPPING_AUTO_UPDATE_PREFLIGHT + ).mappingSource(); + final DocumentMapper existingDocumentMapper = mapperService.documentMapper(); + if (existingDocumentMapper != null && mergedSource.equals(existingDocumentMapper.mappingSource())) { + context.resetForNoopMappingUpdateRetry(mapperService.mappingVersion()); return true; } + } catch (Exception e) { + logger.info(() -> format("%s mapping update rejected by primary", primary.shardId()), e); + assert result.getId() != null; + onComplete(exceptionToResult(e, primary, false, version, result.getId()), context, updateResult); + return true; + } - mappingUpdater.updateMappings(result.getRequiredMappingUpdate(), primary.shardId(), new ActionListener<>() { - @Override - public void onResponse(Void v) { - context.markAsRequiringMappingUpdate(); - waitForMappingUpdate.accept(ActionListener.runAfter(new ActionListener<>() { - @Override - public void onResponse(Void v) { - assert context.requiresWaitingForMappingUpdate(); - context.resetForMappingUpdateRetry(); - } - - @Override - public void onFailure(Exception e) { - context.failOnMappingUpdate(e); - } - }, () -> itemDoneListener.onResponse(null))); - } + mappingUpdater.updateMappings(result.getRequiredMappingUpdate(), primary.shardId(), new ActionListener<>() { + @Override + public void onResponse(Void v) { + context.markAsRequiringMappingUpdate(); + waitForMappingUpdate.accept(ActionListener.runAfter(new ActionListener<>() { + @Override + public void onResponse(Void v) { + assert context.requiresWaitingForMappingUpdate(); + context.resetForMappingUpdateRetry(); + } - @Override - public void onFailure(Exception e) { - onComplete(exceptionToResult(e, primary, isDelete, version, result.getId()), context, updateResult); - // Requesting mapping update failed, so we don't have to wait for a cluster state update - assert context.isInitial(); - itemDoneListener.onResponse(null); - } - }); - return false; - } else { - onComplete(result, context, updateResult); - } - return true; - } + @Override + public void onFailure(Exception e) { + context.failOnMappingUpdate(e); + } + }, () -> itemDoneListener.onResponse(null)), initialMappingVersion); + } - /** - * Creates a new document size observer - * @param documentParsingProvider a provider to create a new observer. - * @param request an index request to provide information about bytes being already parsed. - * @return a Fixed version of DocumentSizeObserver if parsing already happened (in IngestService, UpdateHelper) - * and there is a value to be reported >0 - * It would be pre-populated with information about how many bytes were already parsed - * or a noop instance if parsed bytes in IngestService/UpdateHelper was 0 (like when empty doc or script in update) - * or return a new DocumentSizeObserver that will be used when parsing. - */ - private static DocumentSizeObserver getDocumentSizeObserver(DocumentParsingProvider documentParsingProvider, IndexRequest request) { - if (request.getNormalisedBytesParsed() > 0) { - return documentParsingProvider.newFixedSizeDocumentObserver(request.getNormalisedBytesParsed()); - } else if (request.getNormalisedBytesParsed() == 0) { - return DocumentSizeObserver.EMPTY_INSTANCE; - } // request.getNormalisedBytesParsed() -1, meaning normalisedBytesParsed isn't set as parsing wasn't done yet - return documentParsingProvider.newDocumentSizeObserver(); + @Override + public void onFailure(Exception e) { + onComplete(exceptionToResult(e, primary, false, version, result.getId()), context, updateResult); + // Requesting mapping update failed, so we don't have to wait for a cluster state update + assert context.isInitial(); + itemDoneListener.onResponse(null); + } + }); + return false; } private static Engine.Result exceptionToResult(Exception e, IndexShard primary, boolean isDelete, long version, String id) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index f0f950ca324bf..95c1c0ce05d89 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -10,16 +10,13 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestService; @@ -28,60 +25,53 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.Map; -import java.util.Set; import java.util.concurrent.Executor; -public class TransportSimulateBulkAction extends TransportBulkAction { +/** + * This action simulates bulk indexing data. Pipelines are executed for all indices that the request routes to, but no data is actually + * indexed and no state is changed. Unlike TransportBulkAction, this does not push the work out to the nodes where the shards live (since + * shards are not actually modified). + */ +public class TransportSimulateBulkAction extends TransportAbstractBulkAction { + @Inject public TransportSimulateBulkAction( ThreadPool threadPool, TransportService transportService, ClusterService clusterService, IngestService ingestService, - FeatureService featureService, - NodeClient client, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices ) { super( SimulateBulkAction.INSTANCE, + transportService, + actionFilters, SimulateBulkRequest::new, threadPool, - transportService, clusterService, ingestService, - featureService, - client, - actionFilters, - indexNameExpressionResolver, indexingPressure, systemIndices, System::nanoTime ); } - /* - * This overrides indexData in TransportBulkAction in order to _not_ actually create any indices or index any data. Instead, each - * request gets a corresponding CREATE response, using information from the request. - */ @Override - protected void createMissingIndicesAndIndexData( + protected void doInternalExecute( Task task, BulkRequest bulkRequest, Executor executor, ActionListener listener, - Map indicesToAutoCreate, - Set dataStreamsToRollover, - Set failureStoresToBeRolledOver, - long startTime + long relativeStartTime ) { final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest request = bulkRequest.requests.get(i); - assert request instanceof IndexRequest; // This action is only ever called with IndexRequests + DocWriteRequest docRequest = bulkRequest.requests.get(i); + assert docRequest instanceof IndexRequest : "TransportSimulateBulkAction should only ever be called with IndexRequests"; + IndexRequest request = (IndexRequest) docRequest; + responses.set( i, BulkItemResponse.success( @@ -91,15 +81,17 @@ protected void createMissingIndicesAndIndexData( request.id(), request.index(), request.version(), - ((IndexRequest) request).source(), - ((IndexRequest) request).getContentType(), - ((IndexRequest) request).getExecutedPipelines(), + request.source(), + request.getContentType(), + request.getExecutedPipelines(), null ) ) ); } - listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); + listener.onResponse( + new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(relativeStartTime)) + ); } /* @@ -111,4 +103,10 @@ protected IngestService getIngestService(BulkRequest request) { IngestService rawIngestService = super.getIngestService(request); return new SimulateIngestService(rawIngestService, request); } + + @Override + protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { + // A simulate bulk request should not change any persistent state in the system, so we never write to the failure store + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 794a3f38b56bb..5463f9fec4d2a 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRouting; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -147,6 +146,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement */ private Object rawTimestamp; private long normalisedBytesParsed = -1; + private boolean originatesFromUpdateByScript; public IndexRequest(StreamInput in) throws IOException { this(null, in); @@ -165,10 +165,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = readPipelineName(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { - finalPipeline = readPipelineName(in); - isPipelineResolved = in.readBoolean(); - } + finalPipeline = readPipelineName(in); + isPipelineResolved = in.readBoolean(); isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { @@ -179,14 +177,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - requireAlias = in.readBoolean(); - } else { - requireAlias = false; - } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { - dynamicTemplates = in.readMap(StreamInput::readString); - } + requireAlias = in.readBoolean(); + dynamicTemplates = in.readMap(StreamInput::readString); if (in.getTransportVersion().onOrAfter(PIPELINES_HAVE_RUN_FIELD_ADDED) && in.getTransportVersion().before(TransportVersions.V_8_13_0)) { in.readBoolean(); @@ -206,6 +198,12 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } else { requireDataStream = false; } + + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + originatesFromUpdateByScript = in.readBoolean(); + } else { + originatesFromUpdateByScript = false; + } } public IndexRequest() { @@ -737,12 +735,8 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { - out.writeOptionalString(finalPipeline); - } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { - out.writeBoolean(isPipelineResolved); - } + out.writeOptionalString(finalPipeline); + out.writeBoolean(isPipelineResolved); out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); if (contentType != null) { @@ -753,21 +747,8 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(requireAlias); - } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { - out.writeMap(dynamicTemplates, StreamOutput::writeString); - } else { - if (dynamicTemplates.isEmpty() == false) { - throw new IllegalArgumentException( - Strings.format( - "[dynamic_templates] parameter requires all nodes on %s or later", - TransportVersions.V_7_13_0.toReleaseVersion() - ) - ); - } - } + out.writeBoolean(requireAlias); + out.writeMap(dynamicTemplates, StreamOutput::writeString); if (out.getTransportVersion().onOrAfter(PIPELINES_HAVE_RUN_FIELD_ADDED) && out.getTransportVersion().before(TransportVersions.V_8_13_0)) { out.writeBoolean(normalisedBytesParsed != -1L); @@ -783,6 +764,10 @@ private void writeBody(StreamOutput out) throws IOException { out.writeBoolean(requireDataStream); out.writeZLong(normalisedBytesParsed); } + + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + out.writeBoolean(originatesFromUpdateByScript); + } } @Override @@ -985,4 +970,13 @@ public List getExecutedPipelines() { return Collections.unmodifiableList(executedPipelines); } } + + public IndexRequest setOriginatesFromUpdateByScript(boolean originatesFromUpdateByScript) { + this.originatesFromUpdateByScript = originatesFromUpdateByScript; + return this; + } + + public boolean originatesFromUpdateByScript() { + return this.originatesFromUpdateByScript; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index 5ed449667fe57..7ad81154691c0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -84,6 +84,7 @@ protected void releaseIrrelevantSearchContext(SearchPhaseResult searchPhaseResul && context.getRequest().scroll() == null && (context.isPartOfPointInTime(phaseResult.getContextId()) == false)) { try { + context.getLogger().trace("trying to release search context [{}]", phaseResult.getContextId()); SearchShardTarget shardTarget = phaseResult.getSearchShardTarget(); Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); context.sendReleaseSearchContext( diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 55c754545cbbe..82c498c64e1c9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; @@ -301,11 +302,13 @@ private static Sort checkSameSortTypes(Collection results, SortField[] } private static SortField.Type getType(SortField sortField) { - if (sortField instanceof SortedNumericSortField) { - return ((SortedNumericSortField) sortField).getNumericType(); - } - if (sortField instanceof SortedSetSortField) { + if (sortField instanceof SortedNumericSortField sf) { + return sf.getNumericType(); + } else if (sortField instanceof SortedSetSortField) { return SortField.Type.STRING; + } else if (sortField.getComparatorSource() instanceof IndexFieldData.XFieldComparatorSource cmp) { + // This can occur if the sort field wasn't rewritten by Lucene#rewriteMergeSortField because all search shards are local. + return cmp.reducedType(); } else { return sortField.getType(); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index e1fe6eac7e9c1..514e8d10eeca1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -17,20 +17,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.rescore.RescorerBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.tasks.TaskId; @@ -324,124 +320,15 @@ public void writeTo(StreamOutput out) throws IOException { public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; boolean scroll = scroll() != null; + + if (source != null) { + validationException = source.validate(validationException, scroll); + } if (scroll) { - if (source != null) { - if (source.trackTotalHitsUpTo() != null && source.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE) { - validationException = addValidationError( - "disabling [track_total_hits] is not allowed in a scroll context", - validationException - ); - } - if (source.from() > 0) { - validationException = addValidationError("using [from] is not allowed in a scroll context", validationException); - } - if (source.size() == 0) { - validationException = addValidationError("[size] cannot be [0] in a scroll context", validationException); - } - if (source.rescores() != null && source.rescores().isEmpty() == false) { - validationException = addValidationError("using [rescore] is not allowed in a scroll context", validationException); - } - if (CollectionUtils.isEmpty(source.searchAfter()) == false) { - validationException = addValidationError("[search_after] cannot be used in a scroll context", validationException); - } - if (source.collapse() != null) { - validationException = addValidationError("cannot use `collapse` in a scroll context", validationException); - } - } if (requestCache != null && requestCache) { validationException = addValidationError("[request_cache] cannot be used in a scroll context", validationException); } } - if (source != null) { - if (source.slice() != null) { - if (source.pointInTimeBuilder() == null && (scroll == false)) { - validationException = addValidationError( - "[slice] can only be used with [scroll] or [point-in-time] requests", - validationException - ); - } - } - if (source.from() > 0 && CollectionUtils.isEmpty(source.searchAfter()) == false) { - validationException = addValidationError( - "[from] parameter must be set to 0 when [search_after] is used", - validationException - ); - } - if (source.storedFields() != null) { - if (source.storedFields().fetchFields() == false) { - if (source.fetchSource() != null && source.fetchSource().fetchSource()) { - validationException = addValidationError( - "[stored_fields] cannot be disabled if [_source] is requested", - validationException - ); - } - if (source.fetchFields() != null) { - validationException = addValidationError( - "[stored_fields] cannot be disabled when using the [fields] option", - validationException - ); - } - - } - } - if (source.subSearches().size() >= 2 && source.rankBuilder() == null) { - validationException = addValidationError("[sub_searches] requires [rank]", validationException); - } - if (source.aggregations() != null) { - validationException = source.aggregations().validate(validationException); - } - if (source.rankBuilder() != null) { - int size = source.size() == -1 ? SearchService.DEFAULT_SIZE : source.size(); - if (size == 0) { - validationException = addValidationError("[rank] requires [size] greater than [0]", validationException); - } - if (size > source.rankBuilder().rankWindowSize()) { - validationException = addValidationError( - "[rank] requires [rank_window_size: " - + source.rankBuilder().rankWindowSize() - + "]" - + " be greater than or equal to [size: " - + size - + "]", - validationException - ); - } - int queryCount = source.subSearches().size() + source.knnSearch().size(); - if (source.rankBuilder().isCompoundBuilder() && queryCount < 2) { - validationException = addValidationError( - "[rank] requires a minimum of [2] result sets using a combination of sub searches and/or knn searches", - validationException - ); - } - if (scroll) { - validationException = addValidationError("[rank] cannot be used in a scroll context", validationException); - } - if (source.rescores() != null && source.rescores().isEmpty() == false) { - validationException = addValidationError("[rank] cannot be used with [rescore]", validationException); - } - if (source.sorts() != null && source.sorts().isEmpty() == false) { - validationException = addValidationError("[rank] cannot be used with [sort]", validationException); - } - if (source.collapse() != null) { - validationException = addValidationError("[rank] cannot be used with [collapse]", validationException); - } - if (source.suggest() != null && source.suggest().getSuggestions().isEmpty() == false) { - validationException = addValidationError("[rank] cannot be used with [suggest]", validationException); - } - if (source.highlighter() != null) { - validationException = addValidationError("[rank] cannot be used with [highlighter]", validationException); - } - if (source.pointInTimeBuilder() != null) { - validationException = addValidationError("[rank] cannot be used with [point in time]", validationException); - } - } - if (source.rescores() != null) { - for (@SuppressWarnings("rawtypes") - RescorerBuilder rescoreBuilder : source.rescores()) { - validationException = rescoreBuilder.validate(this, validationException); - } - } - } if (pointInTimeBuilder() != null) { if (scroll) { validationException = addValidationError("using [point in time] is not allowed in a scroll context", validationException); @@ -461,16 +348,6 @@ public ActionRequestValidationException validate() { if (preference() != null) { validationException = addValidationError("[preference] cannot be used with point in time", validationException); } - } else if (source != null && source.sorts() != null) { - for (SortBuilder sortBuilder : source.sorts()) { - if (sortBuilder instanceof FieldSortBuilder - && ShardDocSortField.NAME.equals(((FieldSortBuilder) sortBuilder).getFieldName())) { - validationException = addValidationError( - "[" + FieldSortBuilder.SHARD_DOC_FIELD_NAME + "] sort field cannot be used without [point in time]", - validationException - ); - } - } } if (minCompatibleShardNode() != null) { if (isCcsMinimizeRoundtrips()) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 399a4ad526537..9713d804ddc13 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -8,6 +8,8 @@ package org.elasticsearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -108,6 +110,8 @@ public class SearchTransportService { */ public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]"; + private static final Logger logger = LogManager.getLogger(SearchTransportService.class); + private final TransportService transportService; private final NodeClient client; private final BiFunction< @@ -442,6 +446,7 @@ public static void registerRequestHandler( SearchTransportAPMMetrics searchTransportMetrics ) { final TransportRequestHandler freeContextHandler = (request, channel, task) -> { + logger.trace("releasing search context [{}]", request.id()); boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); }; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index f8d30786aca34..6d2b67a1e0f55 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -49,6 +49,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -121,6 +122,8 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( "action.search.shard_count.limit", @@ -292,24 +295,43 @@ public long buildTookInMillis() { @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { - ActionListener loggingAndMetrics = listener.delegateFailureAndWrap((l, searchResponse) -> { - searchResponseMetrics.recordTookTime(searchResponse.getTookInMillis()); - if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { - // Deduplicate failures by exception message and index - ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); - for (ShardOperationFailedException f : groupedFailures) { - boolean causeHas500Status = false; - if (f.getCause() != null) { - causeHas500Status = ExceptionsHelper.status(f.getCause()).getStatus() >= 500; - } - if ((f.status().getStatus() >= 500 || causeHas500Status) - && ExceptionsHelper.isNodeOrShardUnavailableTypeException(f.getCause()) == false) { - logger.warn("TransportSearchAction shard failure (partial results response)", f); + ActionListener loggingAndMetrics = new ActionListener<>() { + @Override + public void onResponse(SearchResponse searchResponse) { + try { + searchResponseMetrics.recordTookTime(searchResponse.getTookInMillis()); + SearchResponseMetrics.ResponseCountTotalStatus responseCountTotalStatus = + SearchResponseMetrics.ResponseCountTotalStatus.SUCCESS; + if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { + // Deduplicate failures by exception message and index + ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); + for (ShardOperationFailedException f : groupedFailures) { + boolean causeHas500Status = false; + if (f.getCause() != null) { + causeHas500Status = ExceptionsHelper.status(f.getCause()).getStatus() >= 500; + } + if ((f.status().getStatus() >= 500 || causeHas500Status) + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(f.getCause()) == false) { + logger.warn("TransportSearchAction shard failure (partial results response)", f); + responseCountTotalStatus = SearchResponseMetrics.ResponseCountTotalStatus.PARTIAL_FAILURE; + } + } } + listener.onResponse(searchResponse); + // increment after the delegated onResponse to ensure we don't + // record both a success and a failure if there is an exception + searchResponseMetrics.incrementResponseCount(responseCountTotalStatus); + } catch (Exception e) { + onFailure(e); } } - l.onResponse(searchResponse); - }); + + @Override + public void onFailure(Exception e) { + searchResponseMetrics.incrementResponseCount(SearchResponseMetrics.ResponseCountTotalStatus.FAILURE); + listener.onFailure(e); + } + }; executeRequest((SearchTask) task, searchRequest, loggingAndMetrics, AsyncSearchActionProvider::new); } @@ -689,6 +711,7 @@ static SearchResponseMerger createSearchResponseMerger( } /** + * Collect remote search shards that we need to search for potential matches. * Used for ccs_minimize_roundtrips=false */ static void collectSearchShards( @@ -966,6 +989,10 @@ static BiFunction getRemoteClusterNodeLookup(Map< }; } + /** + * Produce a list of {@link SearchShardIterator}s from the set of responses from remote clusters. + * Used for ccs_minimize_roundtrips=false. + */ static List getRemoteShardsIterator( Map searchShardsResponses, Map remoteIndicesByCluster, @@ -1063,6 +1090,9 @@ private static boolean checkAllRemotePITShardsWereReturnedBySearchShards( .allMatch(searchContextIdForNode -> searchContextIdForNode.getClusterAlias() == null); } + /** + * If any of the indices we are searching are frozen, issue deprecation warning. + */ void frozenIndexCheck(ResolvedIndices resolvedIndices) { List frozenIndices = new ArrayList<>(); Map indexMetadataMap = resolvedIndices.getConcreteLocalIndicesMetadata(); @@ -1082,6 +1112,10 @@ void frozenIndexCheck(ResolvedIndices resolvedIndices) { } } + /** + * Execute search locally and for all given remote shards. + * Used when minimize_roundtrips=false or for local search. + */ private void executeSearch( SearchTask task, SearchTimeProvider timeProvider, @@ -1478,6 +1512,11 @@ private static void failIfOverShardCountLimit(ClusterService clusterService, int } } + /** + * {@link ActionListener} suitable for collecting cross-cluster responses. + * @param Response type we're getting as intermediate per-cluster results. + * @param Response type that the final listener expects. + */ abstract static class CCSActionListener implements ActionListener { protected final String clusterAlias; protected final boolean skipUnavailable; @@ -1511,6 +1550,9 @@ public final void onResponse(Response response) { maybeFinish(); } + /** + * Specific listener type will implement this method to process its specific partial response. + */ abstract void innerOnResponse(Response response); @Override @@ -1648,6 +1690,10 @@ static List getLocalLocalShardsIteratorFromPointInTime( return iterators; } + /** + * Create a list of {@link SearchShardIterator}s for the local indices we are searching. + * This resolves aliases and index expressions. + */ List getLocalShardsIterator( ClusterState clusterState, SearchRequest searchRequest, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index c1c43310b0e11..d60033786abeb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -55,20 +55,39 @@ public TransportSearchScrollAction( @Override protected void doExecute(Task task, SearchScrollRequest request, ActionListener listener) { - ActionListener loggingAndMetrics = listener.delegateFailureAndWrap((l, searchResponse) -> { - searchResponseMetrics.recordTookTime(searchResponse.getTookInMillis()); - if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { - ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); - for (ShardOperationFailedException f : groupedFailures) { - Throwable cause = f.getCause() == null ? f : f.getCause(); - if (ExceptionsHelper.status(cause).getStatus() >= 500 - && ExceptionsHelper.isNodeOrShardUnavailableTypeException(cause) == false) { - logger.warn("TransportSearchScrollAction shard failure (partial results response)", f); + ActionListener loggingAndMetrics = new ActionListener<>() { + @Override + public void onResponse(SearchResponse searchResponse) { + try { + searchResponseMetrics.recordTookTime(searchResponse.getTookInMillis()); + SearchResponseMetrics.ResponseCountTotalStatus responseCountTotalStatus = + SearchResponseMetrics.ResponseCountTotalStatus.SUCCESS; + if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { + ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); + for (ShardOperationFailedException f : groupedFailures) { + Throwable cause = f.getCause() == null ? f : f.getCause(); + if (ExceptionsHelper.status(cause).getStatus() >= 500 + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(cause) == false) { + logger.warn("TransportSearchScrollAction shard failure (partial results response)", f); + responseCountTotalStatus = SearchResponseMetrics.ResponseCountTotalStatus.PARTIAL_FAILURE; + } + } } + listener.onResponse(searchResponse); + // increment after the delegated onResponse to ensure we don't + // record both a success and a failure if there is an exception + searchResponseMetrics.incrementResponseCount(responseCountTotalStatus); + } catch (Exception e) { + onFailure(e); } } - l.onResponse(searchResponse); - }); + + @Override + public void onFailure(Exception e) { + searchResponseMetrics.incrementResponseCount(SearchResponseMetrics.ResponseCountTotalStatus.FAILURE); + listener.onFailure(e); + } + }; try { ParsedScrollId scrollId = parseScrollId(request.scrollId()); Runnable action = switch (scrollId.getType()) { diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index c52c9ba1264db..47fcd43f0d238 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -178,32 +178,19 @@ public T actionGet(long timeout, TimeUnit unit) { * Return the result of this future, similarly to {@link FutureUtils#get} with a zero timeout except that this method ignores the * interrupted status of the calling thread. *

- * As with {@link FutureUtils#get}, if the future completed exceptionally with a {@link RuntimeException} then this method throws that - * exception, but if the future completed exceptionally with an exception that is not a {@link RuntimeException} then this method throws - * an {@link UncategorizedExecutionException} whose cause is an {@link ExecutionException} whose cause is the completing exception. + * If the future completed exceptionally then this method throws an {@link ExecutionException} whose cause is the completing exception. *

* It is not valid to call this method if the future is incomplete. * * @return the result of this future, if it has been completed successfully. - * @throws RuntimeException if this future was completed exceptionally, wrapping checked exceptions as described above. + * @throws ExecutionException if this future was completed exceptionally. * @throws CancellationException if this future was cancelled. + * @throws IllegalStateException if this future is incomplete. */ - public T result() { + public T result() throws ExecutionException { return sync.result(); } - /** - * Return the result of this future, if it has been completed successfully, or unwrap and throw the exception with which it was - * completed exceptionally. It is not valid to call this method if the future is incomplete. - */ - public T actionResult() { - try { - return result(); - } catch (ElasticsearchException e) { - throw unwrapEsException(e); - } - } - /** *

Following the contract of {@link AbstractQueuedSynchronizer} we create a * private subclass to hold the synchronizer. This synchronizer is used to @@ -217,7 +204,7 @@ public T actionResult() { * RUNNING to COMPLETING, that thread will then set the result of the * computation, and only then transition to COMPLETED or CANCELLED. *

- * We don't use the integer argument passed between acquire methods so we + * We don't use the integer argument passed between acquire methods, so we * pass around a -1 everywhere. */ static final class Sync extends AbstractQueuedSynchronizer { @@ -302,24 +289,9 @@ private V getValue() throws CancellationException, ExecutionException { } } - V result() { - final int state = getState(); - switch (state) { - case COMPLETED: - if (exception instanceof RuntimeException runtimeException) { - throw runtimeException; - } else if (exception != null) { - throw new UncategorizedExecutionException("Failed execution", new ExecutionException(exception)); - } else { - return value; - } - case CANCELLED: - throw new CancellationException("Task was cancelled."); - default: - final var message = "Error, synchronizer in invalid state: " + state; - assert false : message; - throw new IllegalStateException(message); - } + V result() throws CancellationException, ExecutionException { + assert isDone() : "Error, synchronizer in invalid state: " + getState(); + return getValue(); } /** @@ -358,7 +330,7 @@ boolean cancel() { } /** - * Implementation of completing a task. Either {@code v} or {@code t} will + * Implementation of completing a task. Either {@code v} or {@code e} will * be set but not both. The {@code finalState} is the state to change to * from {@link #RUNNING}. If the state is not in the RUNNING state we * return {@code false} after waiting for the state to be set to a valid @@ -379,7 +351,11 @@ private boolean complete(@Nullable V v, @Nullable Exception e, int finalState) { } else if (getState() == COMPLETING) { // If some other thread is currently completing the future, block until // they are done so we can guarantee completion. - acquireShared(-1); + // Don't use acquire here, to prevent false-positive deadlock detection + // when multiple threads from the same pool are completing the future + while (isDone() == false) { + Thread.onSpinWait(); + } } return doCompletion; } diff --git a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java index 1dec470f8c140..41949c7ce3c22 100644 --- a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java @@ -31,8 +31,12 @@ import java.util.concurrent.Executor; /** - * An {@link ActionListener} to which other {@link ActionListener} instances can subscribe, such that when this listener is completed it - * fans-out its result to the subscribed listeners. + * An {@link ActionListener} to which other {@link ActionListener} instances can subscribe, such that when this listener is + * completed it fans-out its result to the subscribed listeners. + *

+ * If this listener is complete, {@link #addListener} completes the subscribing listener immediately + * with the result with which this listener was completed. Otherwise, the subscribing listener is retained + * and completed when this listener is completed. *

* Exceptions are passed to subscribed listeners without modification. {@link ListenableActionFuture} and {@link ListenableFuture} are child * classes that provide additional exception handling. diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 1b3dca31689e2..269ebd80fb36a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.support.master; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -41,6 +42,13 @@ public abstract class MasterNodeRequest + * Note that in production this is only used for sending the request out, so there's no need to preserve other marker interfaces such + * as {@link org.elasticsearch.action.IndicesRequest} or {@link org.elasticsearch.action.IndicesRequest.Replaceable} on the wrapped request. + * The receiving node will deserialize a request without a wrapper, with the correct interfaces and the appropriate master term stored + * directly in {@link MasterNodeRequest#masterTerm()}. However in tests sometimes we want to intercept the request as it's being sent, for + * which it may be necessary to use the test utility {@code MasterNodeRequestHelper#unwrapTermOverride} to remove the wrapper and access the + * inner request. + */ +class TermOverridingMasterNodeRequest extends TransportRequest { + + private static final Logger logger = LogManager.getLogger(TermOverridingMasterNodeRequest.class); + + final MasterNodeRequest request; + final long newMasterTerm; + + TermOverridingMasterNodeRequest(MasterNodeRequest request, long newMasterTerm) { + assert request.masterTerm() <= newMasterTerm; + this.request = request; + this.newMasterTerm = newMasterTerm; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return unsupported(); + } + + @Override + public String getDescription() { + return request.getDescription(); + } + + @Override + public void remoteAddress(InetSocketAddress remoteAddress) { + unsupported(); + } + + @Override + public InetSocketAddress remoteAddress() { + return unsupported(); + } + + @Override + public void incRef() { + request.incRef(); + } + + @Override + public boolean tryIncRef() { + return request.tryIncRef(); + } + + @Override + public boolean decRef() { + return request.decRef(); + } + + @Override + public boolean hasReferences() { + return request.hasReferences(); + } + + @Override + public void setParentTask(String parentTaskNode, long parentTaskId) { + unsupported(); + } + + @Override + public void setParentTask(TaskId taskId) { + unsupported(); + } + + @Override + public TaskId getParentTask() { + return request.getParentTask(); + } + + @Override + public void setRequestId(long requestId) { + request.setRequestId(requestId); + } + + @Override + public long getRequestId() { + return request.getRequestId(); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + request.writeTo(new TermOverridingStreamOutput(out, newMasterTerm)); + } + + @Override + public String toString() { + return Strings.format("TermOverridingMasterNodeRequest[newMasterTerm=%d in %s]", newMasterTerm, request); + } + + private static T unsupported() { + final var exception = new UnsupportedOperationException("TermOverridingMasterNodeRequest is only for outbound requests"); + logger.error("TermOverridingMasterNodeRequest is only for outbound requests", exception); + assert false : exception; + throw exception; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TermOverridingStreamOutput.java b/server/src/main/java/org/elasticsearch/action/support/master/TermOverridingStreamOutput.java new file mode 100644 index 0000000000000..45cf52bfbe208 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/master/TermOverridingStreamOutput.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support.master; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Wrapper around a {@link StreamOutput} for use when sending a {@link MasterNodeRequest} to another node, exposing the {@link + * MasterNodeRequest#masterTerm()} to send out over the wire. + */ +class TermOverridingStreamOutput extends StreamOutput { + + private final StreamOutput delegate; + final long masterTerm; + + TermOverridingStreamOutput(StreamOutput delegate, long masterTerm) { + this.delegate = delegate; + this.masterTerm = masterTerm; + } + + @Override + public void writeByte(byte b) throws IOException { + delegate.writeByte(b); + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + delegate.writeBytes(b, offset, length); + } + + @Override + public void flush() throws IOException { + delegate.flush(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public TransportVersion getTransportVersion() { + return delegate.getTransportVersion(); + } + + @Override + public void setTransportVersion(TransportVersion version) { + assert false : version; + delegate.setTransportVersion(version); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index e88ebbdc07688..0cbbdb0792890 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -240,13 +240,20 @@ protected void doStart(ClusterState clusterState) { if (nodes.getMasterNode() == null) { logger.debug("no known master node, scheduling a retry"); retryOnNextState(currentStateVersion, null); + } else if (clusterState.term() < request.masterTerm()) { + logger.debug( + "request routed to master in term [{}] but local term is [{}], waiting for local term bump", + request.masterTerm(), + clusterState.term() + ); + retry(currentStateVersion, null, cs -> request.masterTerm() <= cs.term()); } else { DiscoveryNode masterNode = nodes.getMasterNode(); logger.trace("forwarding request [{}] to master [{}]", actionName, masterNode); transportService.sendRequest( masterNode, actionName, - request, + new TermOverridingMasterNodeRequest(request, clusterState.term()), new ActionListenerResponseHandler<>(listener, responseReader, executor) { @Override public void handleException(final TransportException exp) { diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 04ba462523f5f..b38a067e8b316 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -189,7 +189,10 @@ public void onFailure(Exception e) { logger.trace("[{}] op [{}] post replication actions failed for [{}]", primary.routingEntry().shardId(), opType, request); // TODO: fail shard? This will otherwise have the local / global checkpoint info lagging, or possibly have replicas // go out of sync with the primary - finishAsFailed(e); + // We update the checkpoints since a refresh might fail but the operations could be safely persisted, in the case that the + // fsync failed the local checkpoint won't advance and the engine will be marked as failed when the next indexing operation + // is appended into the translog. + updateCheckPoints(primary.routingEntry(), primary::localCheckpoint, primary::globalCheckpoint, () -> finishAsFailed(e)); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/AbstractSynonymsPagedResultAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/AbstractSynonymsPagedResultAction.java index f1ebafe28be7c..e234fd35eebc2 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/AbstractSynonymsPagedResultAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/AbstractSynonymsPagedResultAction.java @@ -81,6 +81,11 @@ private static ActionRequestValidationException validatePositiveInt( ) { if (value < 0) { validationException = addValidationError("[" + paramName + "] must be a positive integer", validationException); + } else if (value > MAX_SYNONYMS_RESULTS) { + validationException = addValidationError( + "[" + paramName + "] must be less than or equal to " + MAX_SYNONYMS_RESULTS, + validationException + ); } return validationException; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 0738e2cb111bb..6b54654d7fbe9 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -181,7 +181,7 @@ static String calculateRouting(GetResult getResult, @Nullable IndexRequest updat Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResult getResult, boolean detectNoop) { final IndexRequest currentRequest = request.doc(); final String routing = calculateRouting(getResult, currentRequest); - final DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(); + final DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(request); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); final Map updatedSourceAsMap = sourceAndContent.v2(); @@ -218,7 +218,7 @@ Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResu return new Result(update, DocWriteResponse.Result.NOOP, updatedSourceAsMap, updateSourceContentType); } else { String index = request.index(); - final IndexRequest finalIndexRequest = new IndexRequest(index).id(request.id()) + IndexRequest finalIndexRequest = new IndexRequest(index).id(request.id()) .routing(routing) .source(updatedSourceAsMap, updateSourceContentType) .setIfSeqNo(getResult.getSeqNo()) @@ -227,6 +227,7 @@ Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResu .timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()) .setNormalisedBytesParsed(documentSizeObserver.normalisedBytesParsed()); + return new Result(finalIndexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } } @@ -261,7 +262,7 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes switch (operation) { case INDEX -> { String index = request.index(); - final IndexRequest indexRequest = new IndexRequest(index).id(request.id()) + IndexRequest indexRequest = new IndexRequest(index).id(request.id()) .routing(routing) .source(updatedSourceAsMap, updateSourceContentType) .setIfSeqNo(getResult.getSeqNo()) @@ -269,7 +270,7 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes .waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()) - .noParsedBytesToReport(); + .setOriginatesFromUpdateByScript(true); return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } case DELETE -> { diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 2cd5258bf4376..211daf2369d99 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -157,11 +157,7 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti ifPrimaryTerm = in.readVLong(); detectNoop = in.readBoolean(); scriptedUpsert = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - requireAlias = in.readBoolean(); - } else { - requireAlias = false; - } + requireAlias = in.readBoolean(); } public UpdateRequest(String index, String id) { @@ -728,20 +724,18 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } out.writeVInt(retryOnConflict); refreshPolicy.writeTo(out); - if (doc == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - // make sure the basics are set - doc.index(index); - doc.id(id); - if (thin) { - doc.writeThin(out); - } else { - doc.writeTo(out); - } - } + writeIndexRequest(out, thin, doc); out.writeOptionalWriteable(fetchSourceContext); + writeIndexRequest(out, thin, upsertRequest); + out.writeBoolean(docAsUpsert); + out.writeZLong(ifSeqNo); + out.writeVLong(ifPrimaryTerm); + out.writeBoolean(detectNoop); + out.writeBoolean(scriptedUpsert); + out.writeBoolean(requireAlias); + } + + private void writeIndexRequest(StreamOutput out, boolean thin, IndexRequest upsertRequest) throws IOException { if (upsertRequest == null) { out.writeBoolean(false); } else { @@ -755,14 +749,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { upsertRequest.writeTo(out); } } - out.writeBoolean(docAsUpsert); - out.writeZLong(ifSeqNo); - out.writeVLong(ifPrimaryTerm); - out.writeBoolean(detectNoop); - out.writeBoolean(scriptedUpsert); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(requireAlias); - } } @Override diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index a60262ff4a097..84811362c08e6 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -584,7 +584,7 @@ public BootstrapCheckResult check(BootstrapContext context) { // visible for testing boolean isSystemCallFilterInstalled() { - return Natives.isSystemCallFilterInstalled(); + return NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.NONE; } @Override @@ -608,7 +608,7 @@ public BootstrapCheckResult check(BootstrapContext context) { // visible for testing boolean isSystemCallFilterInstalled() { - return Natives.isSystemCallFilterInstalled(); + return NativeAccess.instance().getExecSandboxState() != NativeAccess.ExecSandboxState.NONE; } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java index f8ad9dd59650c..005375bf38540 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java @@ -27,16 +27,6 @@ public final class BootstrapInfo { /** no instantiation */ private BootstrapInfo() {} - /** - * Returns true if we successfully loaded native libraries. - *

- * If this returns false, then native operations such as locking - * memory did not work. - */ - public static boolean isNativesAvailable() { - return Natives.JNA_AVAILABLE; - } - /** * Returns true if we were able to lock the process's address space. */ @@ -44,13 +34,6 @@ public static boolean isMemoryLocked() { return NativeAccess.instance().isMemoryLocked(); } - /** - * Returns true if system call filter is installed (supported systems only) - */ - public static boolean isSystemCallFilterInstalled() { - return Natives.isSystemCallFilterInstalled(); - } - /** * Returns information about the console (tty) attached to the server process, or {@code null} * if no console is attached. diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 082e1dd9257e0..be1220da6b1c4 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -19,7 +19,6 @@ import org.elasticsearch.ReleaseVersions; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.ReferenceDocs; -import org.elasticsearch.common.filesystem.FileSystemNatives; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.network.IfConfig; @@ -293,7 +292,7 @@ static void initializeNatives(final Path tmpFile, final boolean mlockAll, final * * TODO: should we fail hard here if system call filters fail to install, or remain lenient in non-production environments? */ - Natives.tryInstallSystemCallFilter(tmpFile); + nativeAccess.tryInstallExecSandbox(); } // mlockall if requested @@ -316,18 +315,8 @@ static void initializeNatives(final Path tmpFile, final boolean mlockAll, final } } - // force remainder of JNA to be loaded (if available). - try { - JNAKernel32Library.getInstance(); - } catch (Exception ignored) { - // we've already logged this. - } - // init lucene random seed. it will use /dev/urandom where available: StringHelper.randomId(); - - // init filesystem natives - FileSystemNatives.init(); } static void initializeProbes() { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java deleted file mode 100644 index 01d9a122138f1..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import com.sun.jna.IntegerType; -import com.sun.jna.Native; -import com.sun.jna.NativeLong; -import com.sun.jna.Pointer; -import com.sun.jna.Structure; -import com.sun.jna.WString; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; - -import java.util.Arrays; -import java.util.List; - -/** - * Library for Windows/Kernel32 - */ -final class JNAKernel32Library { - - private static final Logger logger = LogManager.getLogger(JNAKernel32Library.class); - - // Native library instance must be kept around for the same reason. - private static final class Holder { - private static final JNAKernel32Library instance = new JNAKernel32Library(); - } - - private JNAKernel32Library() { - if (Constants.WINDOWS) { - try { - Native.register("kernel32"); - logger.debug("windows/Kernel32 library loaded"); - } catch (NoClassDefFoundError e) { - logger.warn("JNA not found. native methods and handlers will be disabled."); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link Windows/Kernel32 library. native methods and handlers will be disabled."); - } - } - } - - static JNAKernel32Library getInstance() { - return Holder.instance; - } - - /** - * Memory protection constraints - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx - */ - public static final int PAGE_NOACCESS = 0x0001; - public static final int PAGE_GUARD = 0x0100; - public static final int MEM_COMMIT = 0x1000; - - /** - * Contains information about a range of pages in the virtual address space of a process. - * The VirtualQuery and VirtualQueryEx functions use this structure. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx - */ - public static class MemoryBasicInformation extends Structure { - public Pointer BaseAddress; - public Pointer AllocationBase; - public NativeLong AllocationProtect; - public SizeT RegionSize; - public NativeLong State; - public NativeLong Protect; - public NativeLong Type; - - @Override - protected List getFieldOrder() { - return Arrays.asList("BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type"); - } - } - - public static class SizeT extends IntegerType { - - // JNA requires this no-arg constructor to be public, - // otherwise it fails to register kernel32 library - public SizeT() { - this(0); - } - - SizeT(long value) { - super(Native.SIZE_T_SIZE, value); - } - - } - - /** - * Locks the specified region of the process's virtual address space into physical - * memory, ensuring that subsequent access to the region will not incur a page fault. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx - * - * @param address A pointer to the base address of the region of pages to be locked. - * @param size The size of the region to be locked, in bytes. - * @return true if the function succeeds - */ - native boolean VirtualLock(Pointer address, SizeT size); - - /** - * Retrieves information about a range of pages within the virtual address space of a specified process. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx - * - * @param handle A handle to the process whose memory information is queried. - * @param address A pointer to the base address of the region of pages to be queried. - * @param memoryInfo A pointer to a structure in which information about the specified page range is returned. - * @param length The size of the buffer pointed to by the memoryInfo parameter, in bytes. - * @return the actual number of bytes returned in the information buffer. - */ - native int VirtualQueryEx(Pointer handle, Pointer address, MemoryBasicInformation memoryInfo, int length); - - /** - * Sets the minimum and maximum working set sizes for the specified process. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686234%28v=vs.85%29.aspx - * - * @param handle A handle to the process whose working set sizes is to be set. - * @param minSize The minimum working set size for the process, in bytes. - * @param maxSize The maximum working set size for the process, in bytes. - * @return true if the function succeeds. - */ - native boolean SetProcessWorkingSetSize(Pointer handle, SizeT minSize, SizeT maxSize); - - /** - * Retrieves a pseudo handle for the current process. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx - * - * @return a pseudo handle to the current process. - */ - native Pointer GetCurrentProcess(); - - /** - * Closes an open object handle. - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx - * - * @param handle A valid handle to an open object. - * @return true if the function succeeds. - */ - native boolean CloseHandle(Pointer handle); - - /** - * Retrieves the short path form of the specified path. See - * {@code GetShortPathName}. - * - * @param lpszLongPath the path string - * @param lpszShortPath a buffer to receive the short name - * @param cchBuffer the size of the buffer - * @return the length of the string copied into {@code lpszShortPath}, otherwise zero for failure - */ - native int GetShortPathNameW(WString lpszLongPath, char[] lpszShortPath, int cchBuffer); - - /** - * Creates or opens a new job object - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx - * - * @param jobAttributes security attributes - * @param name job name - * @return job handle if the function succeeds - */ - native Pointer CreateJobObjectW(Pointer jobAttributes, String name); - - /** - * Associates a process with an existing job - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx - * - * @param job job handle - * @param process process handle - * @return true if the function succeeds - */ - native boolean AssignProcessToJobObject(Pointer job, Pointer process); - - /** - * Basic limit information for a job object - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx - */ - public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference { - public long PerProcessUserTimeLimit; - public long PerJobUserTimeLimit; - public int LimitFlags; - public SizeT MinimumWorkingSetSize; - public SizeT MaximumWorkingSetSize; - public int ActiveProcessLimit; - public Pointer Affinity; - public int PriorityClass; - public int SchedulingClass; - - @Override - protected List getFieldOrder() { - return Arrays.asList( - "PerProcessUserTimeLimit", - "PerJobUserTimeLimit", - "LimitFlags", - "MinimumWorkingSetSize", - "MaximumWorkingSetSize", - "ActiveProcessLimit", - "Affinity", - "PriorityClass", - "SchedulingClass" - ); - } - } - - /** - * Constant for JOBOBJECT_BASIC_LIMIT_INFORMATION in Query/Set InformationJobObject - */ - static final int JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS = 2; - - /** - * Constant for LimitFlags, indicating a process limit has been set - */ - static final int JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8; - - /** - * Get job limit and state information - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx - * - * @param job job handle - * @param infoClass information class constant - * @param info pointer to information structure - * @param infoLength size of information structure - * @param returnLength length of data written back to structure (or null if not wanted) - * @return true if the function succeeds - */ - native boolean QueryInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength, Pointer returnLength); - - /** - * Set job limit and state information - * - * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx - * - * @param job job handle - * @param infoClass information class constant - * @param info pointer to information structure - * @param infoLength size of information structure - * @return true if the function succeeds - */ - native boolean SetInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength); -} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java deleted file mode 100644 index ba4e90ee2c6c1..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.nio.file.Path; - -/** - * This class performs the actual work with JNA and library bindings to call native methods. It should only be used after - * we are sure that the JNA classes are available to the JVM - */ -class JNANatives { - - /** no instantiation */ - private JNANatives() {} - - private static final Logger logger = LogManager.getLogger(JNANatives.class); - - // Set to true, in case native system call filter install was successful - static boolean LOCAL_SYSTEM_CALL_FILTER = false; - // Set to true, in case policy can be applied to all threads of the process (even existing ones) - // otherwise they are only inherited for new threads (ES app threads) - static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false; - - static void tryInstallSystemCallFilter(Path tmpFile) { - try { - int ret = SystemCallFilter.init(tmpFile); - LOCAL_SYSTEM_CALL_FILTER = true; - if (ret == 1) { - LOCAL_SYSTEM_CALL_FILTER_ALL = true; - } - } catch (Exception e) { - // this is likely to happen unless the kernel is newish, its a best effort at the moment - // so we log stacktrace at debug for now... - if (logger.isDebugEnabled()) { - logger.debug("unable to install syscall filter", e); - } - logger.warn("unable to install syscall filter: ", e); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java deleted file mode 100644 index c792d1e0bfad0..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.ReferenceDocs; - -import java.lang.invoke.MethodHandles; -import java.nio.file.Path; -import java.util.Locale; - -/** - * The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on - * startup. If they are not available, this class will avoid calling code that loads these classes. - */ -final class Natives { - /** no instantiation */ - private Natives() {} - - private static final Logger logger = LogManager.getLogger(Natives.class); - - // marker to determine if the JNA class files are available to the JVM - static final boolean JNA_AVAILABLE; - - static { - boolean v = false; - try { - // load one of the main JNA classes to see if the classes are available. this does not ensure that all native - // libraries are available, only the ones necessary by JNA to function - MethodHandles.publicLookup().ensureInitialized(com.sun.jna.Native.class); - v = true; - } catch (IllegalAccessException e) { - throw new AssertionError(e); - } catch (UnsatisfiedLinkError e) { - logger.warn( - String.format( - Locale.ROOT, - "unable to load JNA native support library, native methods will be disabled. See %s", - ReferenceDocs.EXECUTABLE_JNA_TMPDIR - ), - e - ); - } - JNA_AVAILABLE = v; - } - - static void tryInstallSystemCallFilter(Path tmpFile) { - if (JNA_AVAILABLE == false) { - logger.warn("cannot install system call filter because JNA is not available"); - return; - } - JNANatives.tryInstallSystemCallFilter(tmpFile); - } - - static boolean isSystemCallFilterInstalled() { - if (JNA_AVAILABLE == false) { - return false; - } - return JNANatives.LOCAL_SYSTEM_CALL_FILTER; - } - -} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java b/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java index b9574f1a29ae8..444ec49c70407 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/PolicyUtil.java @@ -294,8 +294,8 @@ public String getProperty(String key) { + " in policy file [" + policyFile + "]" - + "\nAvailable codebases: " - + codebaseProperties.keySet() + + "\nAvailable codebases: \n " + + String.join("\n ", codebaseProperties.keySet().stream().sorted().toList()) ); } return policy; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java b/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java deleted file mode 100644 index 0ab855d1d5f3a..0000000000000 --- a/server/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java +++ /dev/null @@ -1,641 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.bootstrap; - -import com.sun.jna.Library; -import com.sun.jna.Memory; -import com.sun.jna.Native; -import com.sun.jna.NativeLong; -import com.sun.jna.Pointer; -import com.sun.jna.Structure; -import com.sun.jna.ptr.PointerByReference; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; -import org.elasticsearch.core.IOUtils; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** - * Installs a system call filter to block process execution. - *

- * This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows. - *

- * On Linux it currently supports amd64 and i386 architectures, requires Linux kernel 3.5 or above, and requires - * {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} compiled into the kernel. - *

- * On Linux BPF Filters are installed using either {@code seccomp(2)} (3.17+) or {@code prctl(2)} (3.5+). {@code seccomp(2)} - * is preferred, as it allows filters to be applied to any existing threads in the process, and one motivation - * here is to protect against bugs in the JVM. Otherwise, code will fall back to the {@code prctl(2)} method - * which will at least protect elasticsearch application threads. - *

- * Linux BPF filters will return {@code EACCES} (Access Denied) for the following system calls: - *

    - *
  • {@code execve}
  • - *
  • {@code fork}
  • - *
  • {@code vfork}
  • - *
  • {@code execveat}
  • - *
- *

- * On Solaris 10 or higher, the following privileges are dropped with {@code priv_set(3C)}: - *

    - *
  • {@code PRIV_PROC_FORK}
  • - *
  • {@code PRIV_PROC_EXEC}
  • - *
- *

- * On BSD systems, process creation is restricted with {@code setrlimit(RLIMIT_NPROC)}. - *

- * On Mac OS X Leopard or above, a custom {@code sandbox(7)} ("Seatbelt") profile is installed that - * denies the following rules: - *

    - *
  • {@code process-fork}
  • - *
  • {@code process-exec}
  • - *
- *

- * On Windows, process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}. - *

- * This is not intended as a sandbox. It is another level of security, mostly intended to annoy - * security researchers and make their lives more difficult in achieving "remote execution" exploits. - * @see - * http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt - * @see - * https://reverse.put.as/wp-content/uploads/2011/06/The-Apple-Sandbox-BHDC2011-Paper.pdf - * @see - * https://docs.oracle.com/cd/E23824_01/html/821-1456/prbac-2.html - */ -// not an example of how to write code!!! -final class SystemCallFilter { - private static final Logger logger = LogManager.getLogger(SystemCallFilter.class); - - // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering - - /** Access to non-standard Linux libc methods */ - interface LinuxLibrary extends Library { - /** - * maps to prctl(2) - */ - int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5); - - /** - * used to call seccomp(2), its too new... - * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing - */ - NativeLong syscall(NativeLong number, Object... args); - } - - // null if unavailable or something goes wrong. - private static final LinuxLibrary linux_libc; - - static { - LinuxLibrary lib = null; - if (Constants.LINUX) { - try { - lib = Native.loadLibrary("c", LinuxLibrary.class); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link C library. native methods (seccomp) will be disabled.", e); - } - } - linux_libc = lib; - } - - /** the preferred method is seccomp(2), since we can apply to all threads of the process */ - static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17 - static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17 - - /** otherwise, we can use prctl(2), which will at least protect ES application threads */ - static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5 - static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5 - static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23 - static final int PR_SET_SECCOMP = 22; // since Linux 2.6.23 - static final long SECCOMP_MODE_FILTER = 2; // since Linux Linux 3.5 - - /** corresponds to struct sock_filter */ - static final class SockFilter { - short code; // insn - byte jt; // number of insn to jump (skip) if true - byte jf; // number of insn to jump (skip) if false - int k; // additional data - - SockFilter(short code, byte jt, byte jf, int k) { - this.code = code; - this.jt = jt; - this.jf = jf; - this.k = k; - } - } - - /** corresponds to struct sock_fprog */ - public static final class SockFProg extends Structure implements Structure.ByReference { - public short len; // number of filters - public Pointer filter; // filters - - SockFProg(SockFilter filters[]) { - len = (short) filters.length; - // serialize struct sock_filter * explicitly, its less confusing than the JNA magic we would need - Memory filter = new Memory(len * 8); - ByteBuffer bbuf = filter.getByteBuffer(0, len * 8); - bbuf.order(ByteOrder.nativeOrder()); // little endian - for (SockFilter f : filters) { - bbuf.putShort(f.code); - bbuf.put(f.jt); - bbuf.put(f.jf); - bbuf.putInt(f.k); - } - this.filter = filter; - } - - @Override - protected List getFieldOrder() { - return Arrays.asList("len", "filter"); - } - } - - // BPF "macros" and constants - static final int BPF_LD = 0x00; - static final int BPF_W = 0x00; - static final int BPF_ABS = 0x20; - static final int BPF_JMP = 0x05; - static final int BPF_JEQ = 0x10; - static final int BPF_JGE = 0x30; - static final int BPF_JGT = 0x20; - static final int BPF_RET = 0x06; - static final int BPF_K = 0x00; - - static SockFilter BPF_STMT(int code, int k) { - return new SockFilter((short) code, (byte) 0, (byte) 0, k); - } - - static SockFilter BPF_JUMP(int code, int k, int jt, int jf) { - return new SockFilter((short) code, (byte) jt, (byte) jf, k); - } - - static final int SECCOMP_RET_ERRNO = 0x00050000; - static final int SECCOMP_RET_DATA = 0x0000FFFF; - static final int SECCOMP_RET_ALLOW = 0x7FFF0000; - - // some errno constants for error checking/handling - static final int EACCES = 0x0D; - static final int EFAULT = 0x0E; - static final int EINVAL = 0x16; - static final int ENOSYS = 0x26; - - // offsets that our BPF checks - // check with offsetof() when adding a new arch, move to Arch if different. - static final int SECCOMP_DATA_NR_OFFSET = 0x00; - static final int SECCOMP_DATA_ARCH_OFFSET = 0x04; - - record Arch( - int audit, // AUDIT_ARCH_XXX constant from linux/audit.h - int limit, // syscall limit (necessary for blacklisting on amd64, to ban 32-bit syscalls) - int fork, // __NR_fork - int vfork, // __NR_vfork - int execve, // __NR_execve - int execveat, // __NR_execveat - int seccomp // __NR_seccomp - ) {} - - /** supported architectures map keyed by os.arch */ - private static final Map ARCHITECTURES; - static { - ARCHITECTURES = Map.of( - "amd64", - new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317), - "aarch64", - new Arch(0xC00000B7, 0xFFFFFFFF, 1079, 1071, 221, 281, 277) - ); - } - - /** invokes prctl() from linux libc library */ - private static int linux_prctl(int option, long arg2, long arg3, long arg4, long arg5) { - return linux_libc.prctl(option, new NativeLong(arg2), new NativeLong(arg3), new NativeLong(arg4), new NativeLong(arg5)); - } - - /** invokes syscall() from linux libc library */ - private static long linux_syscall(long number, Object... args) { - return linux_libc.syscall(new NativeLong(number), args).longValue(); - } - - /** try to install our BPF filters via seccomp() or prctl() to block execution */ - private static int linuxImpl() { - // first be defensive: we can give nice errors this way, at the very least. - // also, some of these security features get backported to old versions, checking kernel version here is a big no-no! - final Arch arch = ARCHITECTURES.get(Constants.OS_ARCH); - boolean supported = Constants.LINUX && arch != null; - if (supported == false) { - throw new UnsupportedOperationException("seccomp unavailable: '" + Constants.OS_ARCH + "' architecture unsupported"); - } - - // we couldn't link methods, could be some really ancient kernel (e.g. < 2.1.57) or some bug - if (linux_libc == null) { - throw new UnsupportedOperationException( - "seccomp unavailable: could not link methods. requires kernel 3.5+ " - + "with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" - ); - } - - // try to check system calls really are who they claim - // you never know (e.g. https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57) - final int bogusArg = 0xf7a46a5c; - - // test seccomp(BOGUS) - long ret = linux_syscall(arch.seccomp, bogusArg); - if (ret != -1) { - throw new UnsupportedOperationException("seccomp unavailable: seccomp(BOGUS_OPERATION) returned " + ret); - } else { - int errno = Native.getLastError(); - switch (errno) { - case ENOSYS: - break; // ok - case EINVAL: - break; // ok - default: - throw new UnsupportedOperationException("seccomp(BOGUS_OPERATION): " + JNACLibrary.strerror(errno)); - } - } - - // test seccomp(VALID, BOGUS) - ret = linux_syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, bogusArg); - if (ret != -1) { - throw new UnsupportedOperationException("seccomp unavailable: seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG) returned " + ret); - } else { - int errno = Native.getLastError(); - switch (errno) { - case ENOSYS: - break; // ok - case EINVAL: - break; // ok - default: - throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + JNACLibrary.strerror(errno)); - } - } - - // test prctl(BOGUS) - ret = linux_prctl(bogusArg, 0, 0, 0, 0); - if (ret != -1) { - throw new UnsupportedOperationException("seccomp unavailable: prctl(BOGUS_OPTION) returned " + ret); - } else { - int errno = Native.getLastError(); - switch (errno) { - case ENOSYS: - break; // ok - case EINVAL: - break; // ok - default: - throw new UnsupportedOperationException("prctl(BOGUS_OPTION): " + JNACLibrary.strerror(errno)); - } - } - - // now just normal defensive checks - - // check for GET_NO_NEW_PRIVS - switch (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0)) { - case 0: - break; // not yet set - case 1: - break; // already set by caller - default: - int errno = Native.getLastError(); - if (errno == EINVAL) { - // friendly error, this will be the typical case for an old kernel - throw new UnsupportedOperationException( - "seccomp unavailable: requires kernel 3.5+ with" + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in" - ); - } else { - throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno)); - } - } - // check for SECCOMP - switch (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0)) { - case 0: - break; // not yet set - case 2: - break; // already in filter mode by caller - default: - int errno = Native.getLastError(); - if (errno == EINVAL) { - throw new UnsupportedOperationException( - "seccomp unavailable: CONFIG_SECCOMP not compiled into kernel," - + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" - ); - } else { - throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + JNACLibrary.strerror(errno)); - } - } - // check for SECCOMP_MODE_FILTER - if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0, 0, 0) != 0) { - int errno = Native.getLastError(); - switch (errno) { - case EFAULT: - break; // available - case EINVAL: - throw new UnsupportedOperationException( - "seccomp unavailable: CONFIG_SECCOMP_FILTER not" - + " compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed" - ); - default: - throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno)); - } - } - - // ok, now set PR_SET_NO_NEW_PRIVS, needed to be able to set a seccomp filter as ordinary user - if (linux_prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) != 0) { - throw new UnsupportedOperationException("prctl(PR_SET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError())); - } - - // check it worked - if (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) { - throw new UnsupportedOperationException( - "seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError()) - ); - } - - // BPF installed to check arch, limit, then syscall. - // See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details. - SockFilter insns[] = { - /* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), // - /* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail; - /* 3 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_NR_OFFSET), // - /* 4 */ BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, arch.limit, 5, 0), // if (syscall > LIMIT) goto fail; - /* 5 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.fork, 4, 0), // if (syscall == FORK) goto fail; - /* 6 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.vfork, 3, 0), // if (syscall == VFORK) goto fail; - /* 7 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execve, 2, 0), // if (syscall == EXECVE) goto fail; - /* 8 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.execveat, 1, 0), // if (syscall == EXECVEAT) goto fail; - /* 9 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW), // pass: return OK; - /* 10 */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EACCES & SECCOMP_RET_DATA)), // fail: return EACCES; - }; - // seccomp takes a long, so we pass it one explicitly to keep the JNA simple - SockFProg prog = new SockFProg(insns); - prog.write(); - long pointer = Pointer.nativeValue(prog.getPointer()); - - int method = 1; - // install filter, if this works, after this there is no going back! - // first try it with seccomp(SECCOMP_SET_MODE_FILTER), falling back to prctl() - if (linux_syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, new NativeLong(pointer)) != 0) { - method = 0; - int errno1 = Native.getLastError(); - if (logger.isDebugEnabled()) { - logger.debug( - "seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", - JNACLibrary.strerror(errno1) - ); - } - if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) { - int errno2 = Native.getLastError(); - throw new UnsupportedOperationException( - "seccomp(SECCOMP_SET_MODE_FILTER): " - + JNACLibrary.strerror(errno1) - + ", prctl(PR_SET_SECCOMP): " - + JNACLibrary.strerror(errno2) - ); - } - } - - // now check that the filter was really installed, we should be in filter mode. - if (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) { - throw new UnsupportedOperationException( - "seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " - + JNACLibrary.strerror(Native.getLastError()) - ); - } - - logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app"); - return method; - } - - // OS X implementation via sandbox(7) - - /** Access to non-standard OS X libc methods */ - interface MacLibrary extends Library { - /** - * maps to sandbox_init(3), since Leopard - */ - int sandbox_init(String profile, long flags, PointerByReference errorbuf); - - /** - * releases memory when an error occurs during initialization (e.g. syntax bug) - */ - void sandbox_free_error(Pointer errorbuf); - } - - // null if unavailable, or something goes wrong. - private static final MacLibrary libc_mac; - - static { - MacLibrary lib = null; - if (Constants.MAC_OS_X) { - try { - lib = Native.loadLibrary("c", MacLibrary.class); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link C library. native methods (seatbelt) will be disabled.", e); - } - } - libc_mac = lib; - } - - /** The only supported flag... */ - static final int SANDBOX_NAMED = 1; - /** Allow everything except process fork and execution */ - static final String SANDBOX_RULES = "(version 1) (allow default) (deny process-fork) (deny process-exec)"; - - /** try to install our custom rule profile into sandbox_init() to block execution */ - private static void macImpl(Path tmpFile) throws IOException { - // first be defensive: we can give nice errors this way, at the very least. - boolean supported = Constants.MAC_OS_X; - if (supported == false) { - throw new IllegalStateException("bug: should not be trying to initialize seatbelt for an unsupported OS"); - } - - // we couldn't link methods, could be some really ancient OS X (< Leopard) or some bug - if (libc_mac == null) { - throw new UnsupportedOperationException("seatbelt unavailable: could not link methods. requires Leopard or above."); - } - - // write rules to a temporary file, which will be passed to sandbox_init() - Path rules = Files.createTempFile(tmpFile, "es", "sb"); - Files.write(rules, Collections.singleton(SANDBOX_RULES)); - - boolean success = false; - try { - PointerByReference errorRef = new PointerByReference(); - int ret = libc_mac.sandbox_init(rules.toAbsolutePath().toString(), SANDBOX_NAMED, errorRef); - // if sandbox_init() fails, add the message from the OS (e.g. syntax error) and free the buffer - if (ret != 0) { - Pointer errorBuf = errorRef.getValue(); - RuntimeException e = new UnsupportedOperationException("sandbox_init(): " + errorBuf.getString(0)); - libc_mac.sandbox_free_error(errorBuf); - throw e; - } - logger.debug("OS X seatbelt initialization successful"); - success = true; - } finally { - if (success) { - Files.delete(rules); - } else { - IOUtils.deleteFilesIgnoringExceptions(rules); - } - } - } - - // Solaris implementation via priv_set(3C) - - /** Access to non-standard Solaris libc methods */ - interface SolarisLibrary extends Library { - /** - * see priv_set(3C), a convenience method for setppriv(2). - */ - int priv_set(int op, String which, String... privs); - } - - // null if unavailable, or something goes wrong. - private static final SolarisLibrary libc_solaris; - - static { - SolarisLibrary lib = null; - if (Constants.SUN_OS) { - try { - lib = Native.loadLibrary("c", SolarisLibrary.class); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link C library. native methods (priv_set) will be disabled.", e); - } - } - libc_solaris = lib; - } - - // constants for priv_set(2) - static final int PRIV_OFF = 1; - static final String PRIV_ALLSETS = null; - // see privileges(5) for complete list of these - static final String PRIV_PROC_FORK = "proc_fork"; - static final String PRIV_PROC_EXEC = "proc_exec"; - - static void solarisImpl() { - // first be defensive: we can give nice errors this way, at the very least. - boolean supported = Constants.SUN_OS; - if (supported == false) { - throw new IllegalStateException("bug: should not be trying to initialize priv_set for an unsupported OS"); - } - - // we couldn't link methods, could be some really ancient Solaris or some bug - if (libc_solaris == null) { - throw new UnsupportedOperationException("priv_set unavailable: could not link methods. requires Solaris 10+"); - } - - // drop a null-terminated list of privileges - if (libc_solaris.priv_set(PRIV_OFF, PRIV_ALLSETS, PRIV_PROC_FORK, PRIV_PROC_EXEC, null) != 0) { - throw new UnsupportedOperationException("priv_set unavailable: priv_set(): " + JNACLibrary.strerror(Native.getLastError())); - } - - logger.debug("Solaris priv_set initialization successful"); - } - - // BSD implementation via setrlimit(2) - - // TODO: add OpenBSD to Lucene Constants - // TODO: JNA doesn't have netbsd support, but this mechanism should work there too. - static final boolean OPENBSD = Constants.OS_NAME.startsWith("OpenBSD"); - - // not a standard limit, means something different on linux, etc! - static final int RLIMIT_NPROC = 7; - - static void bsdImpl() { - boolean supported = Constants.FREE_BSD || OPENBSD || Constants.MAC_OS_X; - if (supported == false) { - throw new IllegalStateException("bug: should not be trying to initialize RLIMIT_NPROC for an unsupported OS"); - } - - JNACLibrary.Rlimit limit = new JNACLibrary.Rlimit(); - limit.rlim_cur.setValue(0); - limit.rlim_max.setValue(0); - if (JNACLibrary.setrlimit(RLIMIT_NPROC, limit) != 0) { - throw new UnsupportedOperationException("RLIMIT_NPROC unavailable: " + JNACLibrary.strerror(Native.getLastError())); - } - - logger.debug("BSD RLIMIT_NPROC initialization successful"); - } - - // windows impl via job ActiveProcessLimit - - static void windowsImpl() { - if (Constants.WINDOWS == false) { - throw new IllegalStateException("bug: should not be trying to initialize ActiveProcessLimit for an unsupported OS"); - } - - JNAKernel32Library lib = JNAKernel32Library.getInstance(); - - // create a new Job - Pointer job = lib.CreateJobObjectW(null, null); - if (job == null) { - throw new UnsupportedOperationException("CreateJobObject: " + Native.getLastError()); - } - - try { - // retrieve the current basic limits of the job - int clazz = JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS; - JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION limits = new JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION(); - limits.write(); - if (lib.QueryInformationJobObject(job, clazz, limits.getPointer(), limits.size(), null) == false) { - throw new UnsupportedOperationException("QueryInformationJobObject: " + Native.getLastError()); - } - limits.read(); - // modify the number of active processes to be 1 (exactly the one process we will add to the job). - limits.ActiveProcessLimit = 1; - limits.LimitFlags = JNAKernel32Library.JOB_OBJECT_LIMIT_ACTIVE_PROCESS; - limits.write(); - if (lib.SetInformationJobObject(job, clazz, limits.getPointer(), limits.size()) == false) { - throw new UnsupportedOperationException("SetInformationJobObject: " + Native.getLastError()); - } - // assign ourselves to the job - if (lib.AssignProcessToJobObject(job, lib.GetCurrentProcess()) == false) { - throw new UnsupportedOperationException("AssignProcessToJobObject: " + Native.getLastError()); - } - } finally { - lib.CloseHandle(job); - } - - logger.debug("Windows ActiveProcessLimit initialization successful"); - } - - /** - * Attempt to drop the capability to execute for the process. - *

- * This is best effort and OS and architecture dependent. It may throw any Throwable. - * @return 0 if we can do this for application threads, 1 for the entire process - */ - static int init(Path tmpFile) throws Exception { - if (Constants.LINUX) { - return linuxImpl(); - } else if (Constants.MAC_OS_X) { - // try to enable both mechanisms if possible - bsdImpl(); - macImpl(tmpFile); - return 1; - } else if (Constants.SUN_OS) { - solarisImpl(); - return 1; - } else if (Constants.FREE_BSD || OPENBSD) { - bsdImpl(); - return 1; - } else if (Constants.WINDOWS) { - windowsImpl(); - return 1; - } else { - throw new UnsupportedOperationException("syscall filtering not supported for OS: '" + Constants.OS_NAME + "'"); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index f1fa1e4d1cb72..f9294210e0a6a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContent; @@ -232,6 +233,27 @@ public ClusterState( this.minVersions = blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) ? new CompatibilityVersions(TransportVersions.MINIMUM_COMPATIBLE, Map.of()) // empty map because cluster state is unknown : CompatibilityVersions.minimumVersions(compatibilityVersions.values()); + + assert compatibilityVersions.isEmpty() + || blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) + || assertEventIngestedIsUnknownInMixedClusters(metadata, this.minVersions); + } + + private boolean assertEventIngestedIsUnknownInMixedClusters(Metadata metadata, CompatibilityVersions compatibilityVersions) { + if (compatibilityVersions.transportVersion().before(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + && metadata != null + && metadata.indices() != null) { + for (IndexMetadata indexMetadata : metadata.indices().values()) { + assert indexMetadata.getEventIngestedRange() == IndexLongFieldRange.UNKNOWN + : "event.ingested range should be UNKNOWN but is " + + indexMetadata.getEventIngestedRange() + + " for index: " + + indexMetadata.getIndex() + + " minTransportVersion: " + + compatibilityVersions.transportVersion(); + } + } + return true; } private static boolean assertConsistentRoutingNodes( diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 532a33d07b25d..b6fb370991a93 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -27,6 +27,8 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoryOperation; import org.elasticsearch.repositories.RepositoryShardId; @@ -58,6 +60,8 @@ */ public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { + private static final Logger logger = LogManager.getLogger(SnapshotsInProgress.class); + public static final SnapshotsInProgress EMPTY = new SnapshotsInProgress(Map.of(), Set.of()); public static final String TYPE = "snapshots"; @@ -207,6 +211,17 @@ public Map> obsoleteGenerations(String r // We moved from a non-null generation successful generation to a different non-null successful generation // so the original generation is clearly obsolete because it was in-flight before and is now unreferenced everywhere. obsoleteGenerations.computeIfAbsent(repositoryShardId, ignored -> new HashSet<>()).add(oldStatus.generation()); + logger.debug( + """ + Marking shard generation [{}] file for cleanup. The finalized shard generation is now [{}], for shard \ + snapshot [{}] with shard ID [{}] on node [{}] + """, + oldStatus.generation(), + newStatus.generation(), + entry.snapshot(), + repositoryShardId.shardId(), + oldStatus.nodeId() + ); } } } @@ -441,7 +456,9 @@ public SnapshotsInProgress withUpdatedNodeIdsForRemoval(ClusterState clusterStat updatedNodeIdsForRemoval.addAll(nodeIdsMarkedForRemoval); // remove any nodes which are no longer marked for shutdown if they have no running shard snapshots - updatedNodeIdsForRemoval.removeAll(getObsoleteNodeIdsForRemoval(nodeIdsMarkedForRemoval)); + var restoredNodeIds = getObsoleteNodeIdsForRemoval(nodeIdsMarkedForRemoval); + updatedNodeIdsForRemoval.removeAll(restoredNodeIds); + logger.debug("Resuming shard snapshots on nodes [{}]", restoredNodeIds); if (updatedNodeIdsForRemoval.equals(nodesIdsForRemoval)) { return this; @@ -469,19 +486,26 @@ private static Set getNodesIdsMarkedForRemoval(ClusterState clusterState return result; } + /** + * Identifies any nodes that are no longer marked for removal AND have no running shard snapshots. + * @param latestNodeIdsMarkedForRemoval the current nodes marked for removal in the cluster state. + */ private Set getObsoleteNodeIdsForRemoval(Set latestNodeIdsMarkedForRemoval) { - final var obsoleteNodeIdsForRemoval = new HashSet<>(nodesIdsForRemoval); - obsoleteNodeIdsForRemoval.removeIf(latestNodeIdsMarkedForRemoval::contains); - if (obsoleteNodeIdsForRemoval.isEmpty()) { + // Find any nodes no longer marked for removal. + final var nodeIdsNoLongerMarkedForRemoval = new HashSet<>(nodesIdsForRemoval); + nodeIdsNoLongerMarkedForRemoval.removeIf(latestNodeIdsMarkedForRemoval::contains); + if (nodeIdsNoLongerMarkedForRemoval.isEmpty()) { return Set.of(); } + // If any nodes have INIT state shard snapshots, then the node's snapshots are not concurrency safe to resume yet. All shard + // snapshots on a newly revived node (no longer marked for shutdown) must finish moving to paused before any can resume. for (final var byRepo : entries.values()) { for (final var entry : byRepo.entries()) { if (entry.state() == State.STARTED && entry.hasShardsInInitState()) { for (final var shardSnapshotStatus : entry.shards().values()) { if (shardSnapshotStatus.state() == ShardState.INIT) { - obsoleteNodeIdsForRemoval.remove(shardSnapshotStatus.nodeId()); - if (obsoleteNodeIdsForRemoval.isEmpty()) { + nodeIdsNoLongerMarkedForRemoval.remove(shardSnapshotStatus.nodeId()); + if (nodeIdsNoLongerMarkedForRemoval.isEmpty()) { return Set.of(); } } @@ -489,7 +513,7 @@ private Set getObsoleteNodeIdsForRemoval(Set latestNodeIdsMarked } } } - return obsoleteNodeIdsForRemoval; + return nodeIdsNoLongerMarkedForRemoval; } public boolean nodeIdsForRemovalChanged(SnapshotsInProgress other) { @@ -616,6 +640,9 @@ public record ShardSnapshotStatus( "missing index" ); + /** + * Initializes status with state {@link ShardState#INIT}. + */ public ShardSnapshotStatus(String nodeId, ShardGeneration generation) { this(nodeId, ShardState.INIT, generation); } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 51fca588699e2..a01383b3eaa93 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -12,6 +12,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ResultDeduplicator; import org.elasticsearch.action.support.ChannelActionListener; @@ -543,9 +545,10 @@ public void shardStarted( final long primaryTerm, final String message, final ShardLongFieldRange timestampRange, + final ShardLongFieldRange eventIngestedRange, final ActionListener listener ) { - shardStarted(shardRouting, primaryTerm, message, timestampRange, listener, clusterService.state()); + shardStarted(shardRouting, primaryTerm, message, timestampRange, eventIngestedRange, listener, clusterService.state()); } public void shardStarted( @@ -553,11 +556,19 @@ public void shardStarted( final long primaryTerm, final String message, final ShardLongFieldRange timestampRange, + final ShardLongFieldRange eventIngestedRange, final ActionListener listener, final ClusterState currentState ) { remoteShardStateUpdateDeduplicator.executeOnce( - new StartedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), primaryTerm, message, timestampRange), + new StartedShardEntry( + shardRouting.shardId(), + shardRouting.allocationId().getId(), + primaryTerm, + message, + timestampRange, + eventIngestedRange + ), listener, (req, l) -> sendShardAction(SHARD_STARTED_ACTION_NAME, currentState, req, l) ); @@ -585,6 +596,14 @@ public void messageReceived(StartedShardEntry request, TransportChannel channel, } } + /** + * Holder of the pair of time ranges needed in cluster state - one for @timestamp, the other for 'event.ingested'. + * Since 'event.ingested' was added well after @timestamp, it can be UNKNOWN when @timestamp range is present. + * @param timestampRange range for @timestamp + * @param eventIngestedRange range for event.ingested + */ + record ClusterStateTimeRanges(IndexLongFieldRange timestampRange, IndexLongFieldRange eventIngestedRange) {} + public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor { private final AllocationService allocationService; private final RerouteService rerouteService; @@ -599,37 +618,42 @@ public ClusterState execute(BatchExecutionContext batchE List> tasksToBeApplied = new ArrayList<>(); List shardRoutingsToBeApplied = new ArrayList<>(batchExecutionContext.taskContexts().size()); Set seenShardRoutings = new HashSet<>(); // to prevent duplicates - final Map updatedTimestampRanges = new HashMap<>(); + final Map updatedTimestampRanges = new HashMap<>(); final ClusterState initialState = batchExecutionContext.initialState(); for (var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); - StartedShardEntry entry = task.getEntry(); - final ShardRouting matched = initialState.getRoutingTable().getByAllocationId(entry.shardId, entry.allocationId); + StartedShardEntry startedShardEntry = task.getEntry(); + final ShardRouting matched = initialState.getRoutingTable() + .getByAllocationId(startedShardEntry.shardId, startedShardEntry.allocationId); if (matched == null) { // tasks that correspond to non-existent shards are marked as successful. The reason is that we resend shard started // events on every cluster state publishing that does not contain the shard as started yet. This means that old stale // requests might still be in flight even after the shard has already been started or failed on the master. We just // ignore these requests for now. - logger.debug("{} ignoring shard started task [{}] (shard does not exist anymore)", entry.shardId, entry); + logger.debug( + "{} ignoring shard started task [{}] (shard does not exist anymore)", + startedShardEntry.shardId, + startedShardEntry + ); taskContext.success(task::onSuccess); } else { - if (matched.primary() && entry.primaryTerm > 0) { - final IndexMetadata indexMetadata = initialState.metadata().index(entry.shardId.getIndex()); + if (matched.primary() && startedShardEntry.primaryTerm > 0) { + final IndexMetadata indexMetadata = initialState.metadata().index(startedShardEntry.shardId.getIndex()); assert indexMetadata != null; - final long currentPrimaryTerm = indexMetadata.primaryTerm(entry.shardId.id()); - if (currentPrimaryTerm != entry.primaryTerm) { - assert currentPrimaryTerm > entry.primaryTerm + final long currentPrimaryTerm = indexMetadata.primaryTerm(startedShardEntry.shardId.id()); + if (currentPrimaryTerm != startedShardEntry.primaryTerm) { + assert currentPrimaryTerm > startedShardEntry.primaryTerm : "received a primary term with a higher term than in the " + "current cluster state (received [" - + entry.primaryTerm + + startedShardEntry.primaryTerm + "] but current is [" + currentPrimaryTerm + "])"; logger.debug( "{} ignoring shard started task [{}] (primary term {} does not match current term {})", - entry.shardId, - entry, - entry.primaryTerm, + startedShardEntry.shardId, + startedShardEntry, + startedShardEntry.primaryTerm, currentPrimaryTerm ); taskContext.success(task::onSuccess); @@ -637,12 +661,12 @@ public ClusterState execute(BatchExecutionContext batchE } } if (matched.initializing() == false) { - assert matched.active() : "expected active shard routing for task " + entry + " but found " + matched; + assert matched.active() : "expected active shard routing for task " + startedShardEntry + " but found " + matched; // same as above, this might have been a stale in-flight request, so we just ignore. logger.debug( "{} ignoring shard started task [{}] (shard exists but is not initializing: {})", - entry.shardId, - entry, + startedShardEntry.shardId, + startedShardEntry, matched ); taskContext.success(task::onSuccess); @@ -651,32 +675,66 @@ public ClusterState execute(BatchExecutionContext batchE if (seenShardRoutings.contains(matched)) { logger.trace( "{} ignoring shard started task [{}] (already scheduled to start {})", - entry.shardId, - entry, + startedShardEntry.shardId, + startedShardEntry, matched ); tasksToBeApplied.add(taskContext); } else { - logger.debug("{} starting shard {} (shard started task: [{}])", entry.shardId, matched, entry); + logger.debug( + "{} starting shard {} (shard started task: [{}])", + startedShardEntry.shardId, + matched, + startedShardEntry + ); tasksToBeApplied.add(taskContext); shardRoutingsToBeApplied.add(matched); seenShardRoutings.add(matched); - // expand the timestamp range recorded in the index metadata if needed - final Index index = entry.shardId.getIndex(); - IndexLongFieldRange currentTimestampMillisRange = updatedTimestampRanges.get(index); + // expand the timestamp range(s) recorded in the index metadata if needed + final Index index = startedShardEntry.shardId.getIndex(); + ClusterStateTimeRanges clusterStateTimeRanges = updatedTimestampRanges.get(index); + IndexLongFieldRange currentTimestampMillisRange = clusterStateTimeRanges == null + ? null + : clusterStateTimeRanges.timestampRange(); + IndexLongFieldRange currentEventIngestedMillisRange = clusterStateTimeRanges == null + ? null + : clusterStateTimeRanges.eventIngestedRange(); + final IndexMetadata indexMetadata = initialState.metadata().index(index); if (currentTimestampMillisRange == null) { currentTimestampMillisRange = indexMetadata.getTimestampRange(); } - final IndexLongFieldRange newTimestampMillisRange; - newTimestampMillisRange = currentTimestampMillisRange.extendWithShardRange( - entry.shardId.id(), + if (currentEventIngestedMillisRange == null) { + currentEventIngestedMillisRange = indexMetadata.getEventIngestedRange(); + } + + final IndexLongFieldRange newTimestampMillisRange = currentTimestampMillisRange.extendWithShardRange( + startedShardEntry.shardId.id(), indexMetadata.getNumberOfShards(), - entry.timestampRange + startedShardEntry.timestampRange ); - if (newTimestampMillisRange != currentTimestampMillisRange) { - updatedTimestampRanges.put(index, newTimestampMillisRange); + /* + * Only track 'event.ingested' range this if the cluster state min transport version is on/after the version + * where we added 'event.ingested'. If we don't do that, we will have different cluster states on different + * nodes because we can't send this data over the wire to older nodes. + */ + IndexLongFieldRange newEventIngestedMillisRange = IndexLongFieldRange.UNKNOWN; + TransportVersion minTransportVersion = batchExecutionContext.initialState().getMinTransportVersion(); + if (minTransportVersion.onOrAfter(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + newEventIngestedMillisRange = currentEventIngestedMillisRange.extendWithShardRange( + startedShardEntry.shardId.id(), + indexMetadata.getNumberOfShards(), + startedShardEntry.eventIngestedRange + ); + } + + if (newTimestampMillisRange != currentTimestampMillisRange + || newEventIngestedMillisRange != currentEventIngestedMillisRange) { + updatedTimestampRanges.put( + index, + new ClusterStateTimeRanges(newTimestampMillisRange, newEventIngestedMillisRange) + ); } } } @@ -690,10 +748,12 @@ public ClusterState execute(BatchExecutionContext batchE if (updatedTimestampRanges.isEmpty() == false) { final Metadata.Builder metadataBuilder = Metadata.builder(maybeUpdatedState.metadata()); - for (Map.Entry updatedTimestampRangeEntry : updatedTimestampRanges.entrySet()) { + for (Map.Entry updatedTimeRangesEntry : updatedTimestampRanges.entrySet()) { + ClusterStateTimeRanges timeRanges = updatedTimeRangesEntry.getValue(); metadataBuilder.put( - IndexMetadata.builder(metadataBuilder.getSafe(updatedTimestampRangeEntry.getKey())) - .timestampRange(updatedTimestampRangeEntry.getValue()) + IndexMetadata.builder(metadataBuilder.getSafe(updatedTimeRangesEntry.getKey())) + .timestampRange(timeRanges.timestampRange()) + .eventIngestedRange(timeRanges.eventIngestedRange(), maybeUpdatedState.getMinTransportVersion()) ); } maybeUpdatedState = ClusterState.builder(maybeUpdatedState).metadata(metadataBuilder).build(); @@ -725,6 +785,15 @@ private static boolean assertStartedIndicesHaveCompleteTimestampRanges(ClusterSt + clusterState.metadata().index(cursor.getKey()).getTimestampRange() + " for " + cursor.getValue().prettyPrint(); + + assert cursor.getValue().allPrimaryShardsActive() == false + || clusterState.metadata().index(cursor.getKey()).getEventIngestedRange().isComplete() + : "index [" + + cursor.getKey() + + "] should have complete event.ingested range, but got " + + clusterState.metadata().index(cursor.getKey()).getEventIngestedRange() + + " for " + + cursor.getValue().prettyPrint(); } return true; } @@ -748,6 +817,7 @@ public static class StartedShardEntry extends TransportRequest { final long primaryTerm; final String message; final ShardLongFieldRange timestampRange; + final ShardLongFieldRange eventIngestedRange; StartedShardEntry(StreamInput in) throws IOException { super(in); @@ -756,6 +826,11 @@ public static class StartedShardEntry extends TransportRequest { primaryTerm = in.readVLong(); this.message = in.readString(); this.timestampRange = ShardLongFieldRange.readFrom(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + this.eventIngestedRange = ShardLongFieldRange.readFrom(in); + } else { + this.eventIngestedRange = ShardLongFieldRange.UNKNOWN; + } } public StartedShardEntry( @@ -763,13 +838,15 @@ public StartedShardEntry( final String allocationId, final long primaryTerm, final String message, - final ShardLongFieldRange timestampRange + final ShardLongFieldRange timestampRange, + final ShardLongFieldRange eventIngestedRange ) { this.shardId = shardId; this.allocationId = allocationId; this.primaryTerm = primaryTerm; this.message = message; this.timestampRange = timestampRange; + this.eventIngestedRange = eventIngestedRange; } @Override @@ -780,6 +857,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(primaryTerm); out.writeString(message); timestampRange.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + eventIngestedRange.writeTo(out); + } } @Override @@ -802,12 +882,13 @@ public boolean equals(Object o) { && shardId.equals(that.shardId) && allocationId.equals(that.allocationId) && message.equals(that.message) - && timestampRange.equals(that.timestampRange); + && timestampRange.equals(that.timestampRange) + && eventIngestedRange.equals(that.eventIngestedRange); } @Override public int hashCode() { - return Objects.hash(shardId, allocationId, primaryTerm, message, timestampRange); + return Objects.hash(shardId, allocationId, primaryTerm, message, timestampRange, eventIngestedRange); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java index 6e4968c8f359a..c496ccccd9c10 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java @@ -38,6 +38,11 @@ public ClusterBlockException(StreamInput in) throws IOException { this.blocks = in.readCollectionAsImmutableSet(ClusterBlock::new); } + @Override + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace + } + @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java index e4143f5fe4f35..0927c3d91a71f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -27,6 +28,13 @@ public class DataStreamFailureStoreDefinition { public static final Settings DATA_STREAM_FAILURE_STORE_SETTINGS; public static final CompressedXContent DATA_STREAM_FAILURE_STORE_MAPPING; + public static final int FAILURE_STORE_DEFINITION_VERSION = 1; + public static final Setting FAILURE_STORE_DEFINITION_VERSION_SETTING = Setting.intSetting( + "index.failure_store.version", + 0, + Setting.Property.IndexScope + ); + static { DATA_STREAM_FAILURE_STORE_SETTINGS = Settings.builder() // Always start with the hidden settings for a backing index. @@ -36,6 +44,7 @@ public class DataStreamFailureStoreDefinition { // meant for the backing indices only. .putNull(IndexSettings.DEFAULT_PIPELINE.getKey()) .putNull(IndexSettings.FINAL_PIPELINE.getKey()) + .put(FAILURE_STORE_DEFINITION_VERSION_SETTING.getKey(), FAILURE_STORE_DEFINITION_VERSION) .build(); try { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 64809c963cb6d..2b65a68e8d43c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -137,6 +137,9 @@ public class IndexMetadata implements Diffable, ToXContentFragmen EnumSet.of(ClusterBlockLevel.WRITE) ); + // 'event.ingested' (part of Elastic Common Schema) range is tracked in cluster state, along with @timestamp + public static final String EVENT_INGESTED_FIELD_NAME = "event.ingested"; + @Nullable public String getDownsamplingInterval() { return settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL_KEY); @@ -535,8 +538,10 @@ public Iterator> settings() { static final String KEY_MAPPINGS_HASH = "mappings_hash"; static final String KEY_ALIASES = "aliases"; static final String KEY_ROLLOVER_INFOS = "rollover_info"; + static final String KEY_MAPPINGS_UPDATED_VERSION = "mappings_updated_version"; static final String KEY_SYSTEM = "system"; static final String KEY_TIMESTAMP_RANGE = "timestamp_range"; + static final String KEY_EVENT_INGESTED_RANGE = "event_ingested_range"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String KEY_STATS = "stats"; @@ -594,6 +599,7 @@ public Iterator> settings() { private final DiscoveryNodeFilters initialRecoveryFilters; private final IndexVersion indexCreatedVersion; + private final IndexVersion mappingsUpdatedVersion; private final IndexVersion indexCompatibilityVersion; private final ActiveShardCount waitForActiveShards; @@ -601,7 +607,10 @@ public Iterator> settings() { private final boolean isSystem; private final boolean isHidden; + // range for the @timestamp field for the Index private final IndexLongFieldRange timestampRange; + // range for the event.ingested field for the Index + private final IndexLongFieldRange eventIngestedRange; private final int priority; @@ -659,6 +668,7 @@ private IndexMetadata( final DiscoveryNodeFilters includeFilters, final DiscoveryNodeFilters excludeFilters, final IndexVersion indexCreatedVersion, + final IndexVersion mappingsUpdatedVersion, final int routingNumShards, final int routingPartitionSize, final List routingPaths, @@ -667,6 +677,7 @@ private IndexMetadata( final boolean isSystem, final boolean isHidden, final IndexLongFieldRange timestampRange, + final IndexLongFieldRange eventIngestedRange, final int priority, final long creationDate, final boolean ignoreDiskWatermarks, @@ -689,6 +700,7 @@ private IndexMetadata( this.version = version; assert mappingVersion >= 0 : mappingVersion; this.mappingVersion = mappingVersion; + this.mappingsUpdatedVersion = mappingsUpdatedVersion; assert settingsVersion >= 0 : settingsVersion; this.settingsVersion = settingsVersion; assert aliasesVersion >= 0 : aliasesVersion; @@ -720,6 +732,7 @@ private IndexMetadata( assert isHidden == INDEX_HIDDEN_SETTING.get(settings); this.isHidden = isHidden; this.timestampRange = timestampRange; + this.eventIngestedRange = eventIngestedRange; this.priority = priority; this.creationDate = creationDate; this.ignoreDiskWatermarks = ignoreDiskWatermarks; @@ -767,6 +780,7 @@ IndexMetadata withMappingMetadata(MappingMetadata mapping) { this.includeFilters, this.excludeFilters, this.indexCreatedVersion, + this.mappingsUpdatedVersion, this.routingNumShards, this.routingPartitionSize, this.routingPaths, @@ -775,6 +789,7 @@ IndexMetadata withMappingMetadata(MappingMetadata mapping) { this.isSystem, this.isHidden, this.timestampRange, + this.eventIngestedRange, this.priority, this.creationDate, this.ignoreDiskWatermarks, @@ -826,6 +841,7 @@ public IndexMetadata withInSyncAllocationIds(int shardId, Set inSyncSet) this.includeFilters, this.excludeFilters, this.indexCreatedVersion, + this.mappingsUpdatedVersion, this.routingNumShards, this.routingPartitionSize, this.routingPaths, @@ -834,6 +850,7 @@ public IndexMetadata withInSyncAllocationIds(int shardId, Set inSyncSet) this.isSystem, this.isHidden, this.timestampRange, + this.eventIngestedRange, this.priority, this.creationDate, this.ignoreDiskWatermarks, @@ -883,6 +900,7 @@ public IndexMetadata withIncrementedPrimaryTerm(int shardId) { this.includeFilters, this.excludeFilters, this.indexCreatedVersion, + this.mappingsUpdatedVersion, this.routingNumShards, this.routingPartitionSize, this.routingPaths, @@ -891,6 +909,7 @@ public IndexMetadata withIncrementedPrimaryTerm(int shardId) { this.isSystem, this.isHidden, this.timestampRange, + this.eventIngestedRange, this.priority, this.creationDate, this.ignoreDiskWatermarks, @@ -912,13 +931,24 @@ public IndexMetadata withIncrementedPrimaryTerm(int shardId) { } /** - * @param timestampRange new timestamp range + * @param timestampRange new @timestamp range + * @param eventIngestedRange new 'event.ingested' range + * @param minClusterTransportVersion minimum transport version used between nodes of this cluster * @return copy of this instance with updated timestamp range */ - public IndexMetadata withTimestampRange(IndexLongFieldRange timestampRange) { - if (timestampRange.equals(this.timestampRange)) { + public IndexMetadata withTimestampRanges( + IndexLongFieldRange timestampRange, + IndexLongFieldRange eventIngestedRange, + TransportVersion minClusterTransportVersion + ) { + if (timestampRange.equals(this.timestampRange) && eventIngestedRange.equals(this.eventIngestedRange)) { return this; } + IndexLongFieldRange allowedEventIngestedRange = eventIngestedRange; + // remove this check when the EVENT_INGESTED_RANGE_IN_CLUSTER_STATE version is removed + if (minClusterTransportVersion.before(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + allowedEventIngestedRange = IndexLongFieldRange.UNKNOWN; + } return new IndexMetadata( this.index, this.version, @@ -940,6 +970,7 @@ public IndexMetadata withTimestampRange(IndexLongFieldRange timestampRange) { this.includeFilters, this.excludeFilters, this.indexCreatedVersion, + this.mappingsUpdatedVersion, this.routingNumShards, this.routingPartitionSize, this.routingPaths, @@ -948,6 +979,7 @@ public IndexMetadata withTimestampRange(IndexLongFieldRange timestampRange) { this.isSystem, this.isHidden, timestampRange, + allowedEventIngestedRange, this.priority, this.creationDate, this.ignoreDiskWatermarks, @@ -993,6 +1025,7 @@ public IndexMetadata withIncrementedVersion() { this.includeFilters, this.excludeFilters, this.indexCreatedVersion, + this.mappingsUpdatedVersion, this.routingNumShards, this.routingPartitionSize, this.routingPaths, @@ -1001,6 +1034,7 @@ public IndexMetadata withIncrementedVersion() { this.isSystem, this.isHidden, this.timestampRange, + this.eventIngestedRange, this.priority, this.creationDate, this.ignoreDiskWatermarks, @@ -1037,6 +1071,10 @@ public long getMappingVersion() { return mappingVersion; } + public IndexVersion getMappingsUpdatedVersion() { + return mappingsUpdatedVersion; + } + public long getSettingsVersion() { return settingsVersion; } @@ -1347,6 +1385,10 @@ public IndexLongFieldRange getTimestampRange() { return timestampRange; } + public IndexLongFieldRange getEventIngestedRange() { + return eventIngestedRange; + } + /** * @return whether this index has a time series timestamp range */ @@ -1497,8 +1539,14 @@ private static class IndexMetadataDiff implements Diff { private final Diff> customData; private final Diff>> inSyncAllocationIds; private final Diff> rolloverInfos; + private final IndexVersion mappingsUpdatedVersion; private final boolean isSystem; + + // range for the @timestamp field for the Index private final IndexLongFieldRange timestampRange; + // range for the event.ingested field for the Index + private final IndexLongFieldRange eventIngestedRange; + private final IndexMetadataStats stats; private final Double indexWriteLoadForecast; private final Long shardSizeInBytesForecast; @@ -1534,8 +1582,10 @@ private static class IndexMetadataDiff implements Diff { DiffableUtils.StringSetValueSerializer.getInstance() ); rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); + mappingsUpdatedVersion = after.mappingsUpdatedVersion; isSystem = after.isSystem; timestampRange = after.timestampRange; + eventIngestedRange = after.eventIngestedRange; stats = after.stats; indexWriteLoadForecast = after.writeLoadForecast; shardSizeInBytesForecast = after.shardSizeInBytesForecast; @@ -1594,6 +1644,11 @@ private static class IndexMetadataDiff implements Diff { DiffableUtils.getStringKeySerializer(), ROLLOVER_INFO_DIFF_VALUE_READER ); + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_METADATA_MAPPINGS_UPDATED_VERSION)) { + mappingsUpdatedVersion = IndexVersion.readVersion(in); + } else { + mappingsUpdatedVersion = IndexVersions.ZERO; + } if (in.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { isSystem = in.readBoolean(); } else { @@ -1609,6 +1664,11 @@ private static class IndexMetadataDiff implements Diff { indexWriteLoadForecast = null; shardSizeInBytesForecast = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + eventIngestedRange = IndexLongFieldRange.readFrom(in); + } else { + eventIngestedRange = IndexLongFieldRange.UNKNOWN; + } } @Override @@ -1638,6 +1698,9 @@ public void writeTo(StreamOutput out) throws IOException { customData.writeTo(out); inSyncAllocationIds.writeTo(out); rolloverInfos.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_METADATA_MAPPINGS_UPDATED_VERSION)) { + IndexVersion.writeVersion(mappingsUpdatedVersion, out); + } if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { out.writeBoolean(isSystem); } @@ -1647,6 +1710,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalDouble(indexWriteLoadForecast); out.writeOptionalLong(shardSizeInBytesForecast); } + if (out.getTransportVersion().onOrAfter(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + eventIngestedRange.writeTo(out); + } else { + assert eventIngestedRange == IndexLongFieldRange.UNKNOWN + : "eventIngestedRange should be UNKNOWN until all nodes are on the new version but is " + eventIngestedRange; + } } @Override @@ -1667,6 +1736,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.mapping = mappings.apply( ImmutableOpenMap.builder(1).fPut(MapperService.SINGLE_MAPPING_NAME, part.mapping).build() ).get(MapperService.SINGLE_MAPPING_NAME); + builder.mappingsUpdatedVersion = mappingsUpdatedVersion; builder.inferenceFields.putAllFromMap(inferenceFields.apply(part.inferenceFields)); builder.aliases.putAllFromMap(aliases.apply(part.aliases)); builder.customMetadata.putAllFromMap(customData.apply(part.customData)); @@ -1674,6 +1744,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.rolloverInfos.putAllFromMap(rolloverInfos.apply(part.rolloverInfos)); builder.system(isSystem); builder.timestampRange(timestampRange); + builder.eventIngestedRange(eventIngestedRange); builder.stats(stats); builder.indexWriteLoadForecast(indexWriteLoadForecast); builder.shardSizeInBytesForecast(shardSizeInBytesForecast); @@ -1738,6 +1809,9 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function DiffableUtils.StringSetValueSerializer.getInstance().write(v, o) ); out.writeCollection(rolloverInfos.values()); + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_METADATA_MAPPINGS_UPDATED_VERSION)) { + IndexVersion.writeVersion(mappingsUpdatedVersion, out); + } if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { out.writeBoolean(isSystem); } @@ -1797,6 +1879,12 @@ public void writeTo(StreamOutput out, boolean mappingsAsHash) throws IOException out.writeOptionalDouble(writeLoadForecast); out.writeOptionalLong(shardSizeInBytesForecast); } + if (out.getTransportVersion().onOrAfter(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + eventIngestedRange.writeTo(out); + } else { + assert eventIngestedRange == IndexLongFieldRange.UNKNOWN + : "eventIngestedRange should be UNKNOWN until all nodes are on the new version but is " + eventIngestedRange; + } } @Override @@ -1835,6 +1923,7 @@ public static class Builder { private long[] primaryTerms = null; private Settings settings = Settings.EMPTY; private MappingMetadata mapping; + private IndexVersion mappingsUpdatedVersion = IndexVersion.current(); private final ImmutableOpenMap.Builder inferenceFields; private final ImmutableOpenMap.Builder aliases; private final ImmutableOpenMap.Builder customMetadata; @@ -1843,6 +1932,7 @@ public static class Builder { private Integer routingNumShards; private boolean isSystem; private IndexLongFieldRange timestampRange = IndexLongFieldRange.NO_SHARDS; + private IndexLongFieldRange eventIngestedRange = IndexLongFieldRange.NO_SHARDS; private LifecycleExecutionState lifecycleExecutionState = LifecycleExecutionState.EMPTY_STATE; private IndexMetadataStats stats = null; private Double indexWriteLoadForecast = null; @@ -1873,9 +1963,11 @@ public Builder(IndexMetadata indexMetadata) { this.customMetadata = ImmutableOpenMap.builder(indexMetadata.customData); this.routingNumShards = indexMetadata.routingNumShards; this.inSyncAllocationIds = new HashMap<>(indexMetadata.inSyncAllocationIds); + this.mappingsUpdatedVersion = indexMetadata.mappingsUpdatedVersion; this.rolloverInfos = ImmutableOpenMap.builder(indexMetadata.rolloverInfos); this.isSystem = indexMetadata.isSystem; this.timestampRange = indexMetadata.timestampRange; + this.eventIngestedRange = indexMetadata.eventIngestedRange; this.lifecycleExecutionState = indexMetadata.lifecycleExecutionState; this.stats = indexMetadata.stats; this.indexWriteLoadForecast = indexMetadata.writeLoadForecast; @@ -1964,6 +2056,11 @@ public Builder putMapping(MappingMetadata mappingMd) { return this; } + public Builder mappingsUpdatedVersion(IndexVersion indexVersion) { + this.mappingsUpdatedVersion = indexVersion; + return this; + } + public Builder state(State state) { this.state = state; return this; @@ -2094,6 +2191,29 @@ public Builder timestampRange(IndexLongFieldRange timestampRange) { return this; } + // only for use within this class file where minClusterTransportVersion is not known (e.g., IndexMetadataDiff.apply) + Builder eventIngestedRange(IndexLongFieldRange eventIngestedRange) { + assert eventIngestedRange != null : "eventIngestedRange cannot be null"; + this.eventIngestedRange = eventIngestedRange; + return this; + } + + public Builder eventIngestedRange(IndexLongFieldRange eventIngestedRange, TransportVersion minClusterTransportVersion) { + assert eventIngestedRange != null : "eventIngestedRange cannot be null"; + assert minClusterTransportVersion != null || eventIngestedRange == IndexLongFieldRange.UNKNOWN + : "eventIngestedRange must be UNKNOWN when minClusterTransportVersion is null, but minClusterTransportVersion: " + + minClusterTransportVersion + + "; eventIngestedRange = " + + eventIngestedRange; + if (minClusterTransportVersion != null + && minClusterTransportVersion.before(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + this.eventIngestedRange = IndexLongFieldRange.UNKNOWN; + } else { + this.eventIngestedRange = eventIngestedRange; + } + return this; + } + public Builder stats(IndexMetadataStats stats) { this.stats = stats; return this; @@ -2266,6 +2386,7 @@ IndexMetadata build(boolean repair) { } } + assert eventIngestedRange != null : "eventIngestedRange must be set (non-null) when building IndexMetadata"; final boolean isSearchableSnapshot = SearchableSnapshotsSettings.isSearchableSnapshotStore(settings); String indexModeString = settings.get(IndexSettings.MODE.getKey()); final IndexMode indexMode = indexModeString != null ? IndexMode.fromString(indexModeString.toLowerCase(Locale.ROOT)) : null; @@ -2291,6 +2412,7 @@ IndexMetadata build(boolean repair) { includeFilters, excludeFilters, indexCreatedVersion, + mappingsUpdatedVersion, getRoutingNumShards(), routingPartitionSize, routingPaths, @@ -2299,6 +2421,7 @@ IndexMetadata build(boolean repair) { isSystem, INDEX_HIDDEN_SETTING.get(settings), timestampRange, + eventIngestedRange, IndexMetadata.INDEX_PRIORITY_SETTING.get(settings), settings.getAsLong(SETTING_CREATION_DATE, -1L), DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS.get(settings), @@ -2421,12 +2544,18 @@ public static void toXContent(IndexMetadata indexMetadata, XContentBuilder build rolloverInfo.toXContent(builder, params); } builder.endObject(); + + builder.field(KEY_MAPPINGS_UPDATED_VERSION, indexMetadata.mappingsUpdatedVersion); builder.field(KEY_SYSTEM, indexMetadata.isSystem); builder.startObject(KEY_TIMESTAMP_RANGE); indexMetadata.timestampRange.toXContent(builder, params); builder.endObject(); + builder.startObject(KEY_EVENT_INGESTED_RANGE); + indexMetadata.eventIngestedRange.toXContent(builder, params); + builder.endObject(); + if (indexMetadata.stats != null) { builder.startObject(KEY_STATS); indexMetadata.stats.toXContent(builder, params); @@ -2465,7 +2594,8 @@ public static IndexMetadata fromXContent(XContentParser parser, Map builder.mappingsUpdatedVersion(IndexVersion.fromId(parser.intValue())); case KEY_WRITE_LOAD_FORECAST -> builder.indexWriteLoadForecast(parser.doubleValue()); case KEY_SHARD_SIZE_FORECAST -> builder.shardSizeInBytesForecast(parser.longValue()); default -> throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index e25c12d0c2ad7..905c3078b3c9c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -522,7 +522,7 @@ public Metadata withLastCommittedValues( /** * Creates a copy of this instance updated with the given {@link IndexMetadata} that must only contain changes to primary terms * and in-sync allocation ids relative to the existing entries. This method is only used by - * {@link org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater#applyChanges(Metadata, RoutingTable)}. + * {@link org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater#applyChanges(Metadata, RoutingTable, TransportVersion)}. * @param updates map of index name to {@link IndexMetadata}. * @return updated metadata instance */ diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 52642e1de8ac9..b5ee0ebd7e387 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -12,6 +12,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; @@ -65,6 +67,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.IndexCreationException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexNameException; @@ -468,6 +471,8 @@ private ClusterState applyCreateIndexWithTemporaryService( final ActionListener rerouteListener ) throws Exception { // create the index here (on the master) to validate it can be created, as well as adding the mapping + assert indicesService.hasIndex(temporaryIndexMeta.getIndex()) == false + : Strings.format("Index [%s] already exists", temporaryIndexMeta.getIndex().getName()); return indicesService.withTempIndexService(temporaryIndexMeta, indexService -> { try { updateIndexMappingsAndBuildSortOrder(indexService, request, mappings, sourceMetadata); @@ -487,7 +492,8 @@ private ClusterState applyCreateIndexWithTemporaryService( temporaryIndexMeta.getSettings(), temporaryIndexMeta.getRoutingNumShards(), sourceMetadata, - temporaryIndexMeta.isSystem() + temporaryIndexMeta.isSystem(), + currentState.getMinTransportVersion() ); } catch (Exception e) { logger.info("failed to build index metadata [{}]", request.index()); @@ -1257,10 +1263,15 @@ static IndexMetadata buildIndexMetadata( Settings indexSettings, int routingNumShards, @Nullable IndexMetadata sourceMetadata, - boolean isSystem + boolean isSystem, + TransportVersion minClusterTransportVersion ) { IndexMetadata.Builder indexMetadataBuilder = createIndexMetadataBuilder(indexName, sourceMetadata, indexSettings, routingNumShards); indexMetadataBuilder.system(isSystem); + if (minClusterTransportVersion.before(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + // promote to UNKNOWN for older versions since they don't know how to handle event.ingested in cluster state + indexMetadataBuilder.eventIngestedRange(IndexLongFieldRange.UNKNOWN, minClusterTransportVersion); + } // now, update the mappings with the actual source Map mappingsMetadata = new HashMap<>(); DocumentMapper docMapper = documentMapperSupplier.get(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index 34f71d315f97a..be6d6f3ef1e53 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -886,6 +886,7 @@ static Tuple> closeRoutingTable( final IndexMetadata.Builder updatedMetadata = IndexMetadata.builder(indexMetadata).state(IndexMetadata.State.CLOSE); metadata.put( updatedMetadata.timestampRange(IndexLongFieldRange.NO_SHARDS) + .eventIngestedRange(IndexLongFieldRange.NO_SHARDS, currentState.getMinTransportVersion()) .settingsVersion(indexMetadata.getSettingsVersion() + 1) .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) ); @@ -1132,6 +1133,7 @@ private ClusterState openIndices(final Index[] indices, final ClusterState curre .settingsVersion(indexMetadata.getSettingsVersion() + 1) .settings(updatedSettings) .timestampRange(IndexLongFieldRange.NO_SHARDS) + .eventIngestedRange(IndexLongFieldRange.NO_SHARDS, currentState.getMinTransportVersion()) .build(); // The index might be closed because we couldn't import it due to an old incompatible diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 6d99874fd2edb..e9658e71f895e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; @@ -713,7 +714,9 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT ) ); } - // Then apply settings resolved from templates: + // Then apply setting from component templates: + finalSettings.put(combinedSettings); + // Then finally apply settings resolved from index template: finalSettings.put(finalTemplate.map(Template::settings).orElse(Settings.EMPTY)); var templateToValidate = indexTemplate.toBuilder() @@ -1652,7 +1655,12 @@ private static void validateCompositeTemplate( final ClusterState stateWithIndex = ClusterState.builder(stateWithTemplate) .metadata( Metadata.builder(stateWithTemplate.metadata()) - .put(IndexMetadata.builder(temporaryIndexName).settings(finalResolvedSettings)) + .put( + IndexMetadata.builder(temporaryIndexName) + // necessary to pass asserts in ClusterState constructor + .eventIngestedRange(IndexLongFieldRange.UNKNOWN, state.getMinTransportVersion()) + .settings(finalResolvedSettings) + ) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java index 4e714b96f64c7..4ed18489c44b0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java @@ -27,6 +27,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -199,7 +200,8 @@ private static ClusterState applyRequest( indexMetadataBuilder.putInferenceFields(docMapper.mappers().inferenceFields()); } if (updatedMapping) { - indexMetadataBuilder.mappingVersion(1 + indexMetadataBuilder.mappingVersion()); + indexMetadataBuilder.mappingVersion(1 + indexMetadataBuilder.mappingVersion()) + .mappingsUpdatedVersion(IndexVersion.current()); } /* * This implicitly increments the index metadata version and builds the index metadata. This means that we need to have diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index 9dbbdd597a4ce..13c2fabd6b3df 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; @@ -246,6 +247,7 @@ static void prepareBackingIndex( imb.settings(settingsUpdate.build()) .settingsVersion(im.getSettingsVersion() + 1) .mappingVersion(im.getMappingVersion() + 1) + .mappingsUpdatedVersion(IndexVersion.current()) .putMapping(new MappingMetadata(mapper)); b.put(imb); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index fb2fcf1a02ad0..6f15eb5f6e49d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -151,13 +151,12 @@ private abstract static class IdAndRoutingOnly extends IndexRouting { @Override public void process(IndexRequest indexRequest) { - if ("".equals(indexRequest.id())) { - throw new IllegalArgumentException("if _id is specified it must not be empty"); - } - // generate id if not already provided - if (indexRequest.id() == null) { + final String id = indexRequest.id(); + if (id == null) { indexRequest.autoGenerateId(); + } else if (id.isEmpty()) { + throw new IllegalArgumentException("if _id is specified it must not be empty"); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java index e8231f8c09387..98885acd127e2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -104,9 +105,10 @@ public void relocationCompleted(ShardRouting removedRelocationSource) { * * @param oldMetadata {@link Metadata} object from before the routing nodes was changed. * @param newRoutingTable {@link RoutingTable} object after routing changes were applied. + * @param minClusterTransportVersion minimum TransportVersion used between nodes of this cluster * @return adapted {@link Metadata}, potentially the original one if no change was needed. */ - public Metadata applyChanges(Metadata oldMetadata, RoutingTable newRoutingTable) { + public Metadata applyChanges(Metadata oldMetadata, RoutingTable newRoutingTable, TransportVersion minClusterTransportVersion) { Map>> changesGroupedByIndex = shardChanges.entrySet() .stream() .collect(Collectors.groupingBy(e -> e.getKey().getIndex())); @@ -119,7 +121,14 @@ public Metadata applyChanges(Metadata oldMetadata, RoutingTable newRoutingTable) for (Map.Entry shardEntry : indexChanges.getValue()) { ShardId shardId = shardEntry.getKey(); Updates updates = shardEntry.getValue(); - updatedIndexMetadata = updateInSyncAllocations(newRoutingTable, oldIndexMetadata, updatedIndexMetadata, shardId, updates); + updatedIndexMetadata = updateInSyncAllocations( + newRoutingTable, + oldIndexMetadata, + updatedIndexMetadata, + shardId, + updates, + minClusterTransportVersion + ); updatedIndexMetadata = updates.increaseTerm ? updatedIndexMetadata.withIncrementedPrimaryTerm(shardId.id()) : updatedIndexMetadata; @@ -140,7 +149,8 @@ private static IndexMetadata updateInSyncAllocations( IndexMetadata oldIndexMetadata, IndexMetadata updatedIndexMetadata, ShardId shardId, - Updates updates + Updates updates, + TransportVersion minClusterTransportVersion ) { assert Sets.haveEmptyIntersection(updates.addedAllocationIds, updates.removedAllocationIds) : "allocation ids cannot be both added and removed in the same allocation round, added ids: " @@ -167,10 +177,13 @@ private static IndexMetadata updateInSyncAllocations( updatedIndexMetadata = updatedIndexMetadata.withInSyncAllocationIds(shardId.id(), Set.of()); } else { final String allocationId; + if (recoverySource == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE) { allocationId = RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID; - updatedIndexMetadata = updatedIndexMetadata.withTimestampRange( - updatedIndexMetadata.getTimestampRange().removeShard(shardId.id(), oldIndexMetadata.getNumberOfShards()) + updatedIndexMetadata = updatedIndexMetadata.withTimestampRanges( + updatedIndexMetadata.getTimestampRange().removeShard(shardId.id(), oldIndexMetadata.getNumberOfShards()), + updatedIndexMetadata.getEventIngestedRange().removeShard(shardId.id(), oldIndexMetadata.getNumberOfShards()), + minClusterTransportVersion ); } else { assert recoverySource instanceof RecoverySource.SnapshotRecoverySource diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 382e49135ea8d..af5f8cd7bd8c6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -339,7 +339,7 @@ public RoutingChangesObserver changes() { * Returns updated {@link Metadata} based on the changes that were made to the routing nodes */ public Metadata updateMetadataWithRoutingChanges(RoutingTable newRoutingTable) { - Metadata metadata = indexMetadataUpdater.applyChanges(metadata(), newRoutingTable); + Metadata metadata = indexMetadataUpdater.applyChanges(metadata(), newRoutingTable, clusterState.getMinTransportVersion()); return resizeSourceIndexUpdater.applyChanges(metadata, newRoutingTable); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 193a1558c857a..411143b1aef9d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -68,7 +68,7 @@ *

  • even shard count across nodes (weighted by cluster.routing.allocation.balance.shard)
  • *
  • spread shards of the same index across different nodes (weighted by cluster.routing.allocation.balance.index)
  • *
  • even write load of the data streams write indices across nodes (weighted by cluster.routing.allocation.balance.write_load)
  • - *
  • even disk usage across nodes (weighted by cluster.routing.allocation.balance.write_load)
  • + *
  • even disk usage across nodes (weighted by cluster.routing.allocation.balance.disk_usage)
  • * * The sensitivity of the algorithm is defined by cluster.routing.allocation.balance.threshold. * Allocator takes into account constraints set by {@code AllocationDeciders} when allocating and balancing shards. diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 2cac6ddb159bc..770ed4d213c55 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -73,6 +73,10 @@ public enum ReferenceDocs { UNASSIGNED_SHARDS, EXECUTABLE_JNA_TMPDIR, NETWORK_THREADING_MODEL, + ALLOCATION_EXPLAIN_API, + NETWORK_BINDING_AND_PUBLISHING, + SNAPSHOT_REPOSITORY_ANALYSIS, + S3_COMPATIBLE_REPOSITORIES, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/java/org/elasticsearch/common/compress/Compressor.java b/server/src/main/java/org/elasticsearch/common/compress/Compressor.java index 239f168306a94..400653a69a9be 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/Compressor.java +++ b/server/src/main/java/org/elasticsearch/common/compress/Compressor.java @@ -26,7 +26,16 @@ public interface Compressor { */ default StreamInput threadLocalStreamInput(InputStream in) throws IOException { // wrap stream in buffer since InputStreamStreamInput doesn't do any buffering itself but does a lot of small reads - return new InputStreamStreamInput(new BufferedInputStream(threadLocalInputStream(in), DeflateCompressor.BUFFER_SIZE)); + return new InputStreamStreamInput(new BufferedInputStream(threadLocalInputStream(in), DeflateCompressor.BUFFER_SIZE) { + @Override + public int read() throws IOException { + // override read to avoid synchronized single byte reads now that JEP374 removed biased locking + if (pos >= count) { + return super.read(); + } + return buf[pos++] & 0xFF; + } + }); } /** diff --git a/server/src/main/java/org/elasticsearch/common/filesystem/FileSystemNatives.java b/server/src/main/java/org/elasticsearch/common/filesystem/FileSystemNatives.java deleted file mode 100644 index 00502d64b3896..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/filesystem/FileSystemNatives.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.filesystem; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; - -import java.nio.file.Path; -import java.util.OptionalLong; - -/** - * This class provides utility methods for calling some native methods related to filesystems. - */ -public final class FileSystemNatives { - - private static final Logger logger = LogManager.getLogger(FileSystemNatives.class); - - @FunctionalInterface - interface Provider { - OptionalLong allocatedSizeInBytes(Path path); - } - - private static final Provider NOOP_FILE_SYSTEM_NATIVES_PROVIDER = path -> OptionalLong.empty(); - private static final Provider JNA_PROVIDER = loadJnaProvider(); - - private static Provider loadJnaProvider() { - try { - // load one of the main JNA classes to see if the classes are available. this does not ensure that all native - // libraries are available, only the ones necessary by JNA to function - Class.forName("com.sun.jna.Native"); - if (Constants.WINDOWS) { - return WindowsFileSystemNatives.getInstance(); - } else if (Constants.LINUX && Constants.JRE_IS_64BIT) { - return LinuxFileSystemNatives.getInstance(); - } - } catch (ClassNotFoundException e) { - logger.warn("JNA not found. FileSystemNatives methods will be disabled.", e); - } catch (LinkageError e) { - logger.warn("unable to load JNA native support library, FileSystemNatives methods will be disabled.", e); - } - return NOOP_FILE_SYSTEM_NATIVES_PROVIDER; - } - - private FileSystemNatives() {} - - public static void init() { - assert JNA_PROVIDER != null; - } - - /** - * Returns the number of allocated bytes on disk for a given file. - * - * @param path the path to the file - * @return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file. The optional is empty is the - * allocated size of the file failed be retrieved using native methods - */ - public static OptionalLong allocatedSizeInBytes(Path path) { - return JNA_PROVIDER.allocatedSizeInBytes(path); - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/filesystem/LinuxFileSystemNatives.java b/server/src/main/java/org/elasticsearch/common/filesystem/LinuxFileSystemNatives.java deleted file mode 100644 index b40fb5c2e145b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/filesystem/LinuxFileSystemNatives.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.filesystem; - -import com.sun.jna.LastErrorException; -import com.sun.jna.Native; -import com.sun.jna.Platform; -import com.sun.jna.Structure; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.time.Instant; -import java.util.OptionalLong; - -import static org.elasticsearch.core.Strings.format; - -/** - * {@link FileSystemNatives.Provider} implementation for Linux x86-64bits - */ -final class LinuxFileSystemNatives implements FileSystemNatives.Provider { - - private static final Logger logger = LogManager.getLogger(LinuxFileSystemNatives.class); - - private static final LinuxFileSystemNatives INSTANCE = new LinuxFileSystemNatives(); - - /** st_blocks field indicates the number of blocks allocated to the file, 512-byte units **/ - private static final long ST_BLOCKS_UNIT = 512L; - - /** - * Version of the `struct stat' data structure. - * - * To allow the `struct stat' structure bits to vary without changing shared library major version number, the `stat' function is often - * an inline wrapper around `xstat' which takes a leading version-number argument designating the data structure and bits used. - * - * In glibc this version is defined in bits/stat.h (or bits/struct_stat.h in glibc 2.33, or bits/xstatver.h in more recent versions). - * - * For x86-64 the _STAT_VER used is: - * # define _STAT_VER_LINUX 1 - * # define _STAT_VER _STAT_VER_LINUX - * - * For other architectures the _STAT_VER used is: - * # define _STAT_VER_LINUX 0 - * # define _STAT_VER _STAT_VER_LINUX - **/ - private static int loadStatVersion() { - return "aarch64".equalsIgnoreCase(Constants.OS_ARCH) ? 0 : 1; - } - - private static final int STAT_VER = loadStatVersion(); - - private LinuxFileSystemNatives() { - assert Constants.LINUX : Constants.OS_NAME; - assert Constants.JRE_IS_64BIT : Constants.OS_ARCH; - try { - Native.register(XStatLibrary.class, Platform.C_LIBRARY_NAME); - logger.debug("C library loaded"); - } catch (LinkageError e) { - logger.warn("unable to link C library. native methods and handlers will be disabled.", e); - throw e; - } - } - - static LinuxFileSystemNatives getInstance() { - return INSTANCE; - } - - public static class XStatLibrary { - public static native int __xstat(int version, String path, Stat stats) throws LastErrorException; - } - - /** - * Retrieves the actual number of bytes of disk storage used to store a specified file. - * - * @param path the path to the file - * @return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file, or empty if the size is invalid - */ - @Override - public OptionalLong allocatedSizeInBytes(Path path) { - assert Files.isRegularFile(path) : path; - try { - final Stat stats = new Stat(); - final int rc = XStatLibrary.__xstat(STAT_VER, path.toString(), stats); - if (logger.isTraceEnabled()) { - logger.trace("executing native method __xstat() returned {} with error code [{}] for file [{}]", stats, rc, path); - } - return OptionalLong.of(stats.st_blocks * ST_BLOCKS_UNIT); - } catch (LastErrorException e) { - logger.warn( - () -> format( - "error when executing native method __xstat(int vers, const char *name, struct stat *buf) for file [%s]", - path - ), - e - ); - } - return OptionalLong.empty(); - } - - @Structure.FieldOrder( - { - "st_dev", - "st_ino", - "st_nlink", - "st_mode", - "st_uid", - "st_gid", - "__pad0", - "st_rdev", - "st_size", - "st_blksize", - "st_blocks", - "st_atim", - "st_mtim", - "st_ctim", - "__glibc_reserved0", - "__glibc_reserved1", - "__glibc_reserved2" } - ) - public static class Stat extends Structure { - - /** - * The stat structure varies across architectures in the glibc and kernel source codes. For example some fields might be ordered - * differently and/or some padding bytes might be present between some fields. - * - * The struct implemented here refers to the Linux x86 architecture in the glibc source files: - * - glibc version 2.23: sysdeps/unix/sysv/linux/x86/bits/stat.h - * - glibc version 2.33: sysdeps/unix/sysv/linux/x86/bits/struct_stat.h - * - * The following command is useful to compile the stat struct on a given system: - * echo "#include <sys/stat.h>" | gcc -xc - -E -dD | grep -ve '^$' | grep -A23 '^struct stat' - */ - public long st_dev; // __dev_t st_dev; /* Device. */ - public long st_ino; // __ino_t st_ino; /* File serial number. */ - public long st_nlink; // __nlink_t st_nlink; /* Link count. */ - public int st_mode; // __mode_t st_mode; /* File mode. */ - public int st_uid; // __uid_t st_uid; /* User ID of the file's owner. */ - public int st_gid; // __gid_t st_gid; /* Group ID of the file's group. */ - public int __pad0; - public long st_rdev; // __dev_t st_rdev; /* Device number, if device. */ - public long st_size; // __off_t st_size; /* Size of file, in bytes. */ - public long st_blksize; // __blksize_t st_blksize; /* Optimal block size for I/O. */ - public long st_blocks; // __blkcnt_t st_blocks; /* Number 512-byte blocks allocated. */ - public Time st_atim; // struct timespec st_atim; /* Time of last access. */ - public Time st_mtim; // struct timespec st_mtim; /* Time of last modification. */ - public Time st_ctim; // struct timespec st_ctim; /* Time of last status change. */ - public long __glibc_reserved0; // __syscall_slong_t - public long __glibc_reserved1; // __syscall_slong_t - public long __glibc_reserved2; // __syscall_slong_t - - @Override - public String toString() { - return "[st_dev=" - + st_dev - + ", st_ino=" - + st_ino - + ", st_nlink=" - + st_nlink - + ", st_mode=" - + st_mode - + ", st_uid=" - + st_uid - + ", st_gid=" - + st_gid - + ", st_rdev=" - + st_rdev - + ", st_size=" - + st_size - + ", st_blksize=" - + st_blksize - + ", st_blocks=" - + st_blocks - + ", st_atim=" - + Instant.ofEpochSecond(st_atim.tv_sec, st_atim.tv_nsec) - + ", st_mtim=" - + Instant.ofEpochSecond(st_mtim.tv_sec, st_mtim.tv_nsec) - + ", st_ctim=" - + Instant.ofEpochSecond(st_ctim.tv_sec, st_ctim.tv_nsec) - + ']'; - } - } - - @Structure.FieldOrder({ "tv_sec", "tv_nsec" }) - public static class Time extends Structure { - public long tv_sec; - public long tv_nsec; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/filesystem/WindowsFileSystemNatives.java b/server/src/main/java/org/elasticsearch/common/filesystem/WindowsFileSystemNatives.java deleted file mode 100644 index 4fe219bfc774d..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/filesystem/WindowsFileSystemNatives.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.filesystem; - -import com.sun.jna.Native; -import com.sun.jna.WString; -import com.sun.jna.ptr.IntByReference; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.OptionalLong; - -/** - * {@link FileSystemNatives.Provider} implementation for Windows/Kernel32 - */ -final class WindowsFileSystemNatives implements FileSystemNatives.Provider { - - private static final Logger logger = LogManager.getLogger(WindowsFileSystemNatives.class); - - private static final WindowsFileSystemNatives INSTANCE = new WindowsFileSystemNatives(); - - private static final int INVALID_FILE_SIZE = -1; - private static final int NO_ERROR = 0; - - private WindowsFileSystemNatives() { - assert Constants.WINDOWS : Constants.OS_NAME; - try { - Native.register("kernel32"); - logger.debug("windows/Kernel32 library loaded"); - } catch (LinkageError e) { - logger.warn("unable to link Windows/Kernel32 library. native methods and handlers will be disabled.", e); - throw e; - } - } - - static WindowsFileSystemNatives getInstance() { - return INSTANCE; - } - - /** - * Retrieves the actual number of bytes of disk storage used to store a specified file. - * - * https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getcompressedfilesizew - * - * @param lpFileName the path string - * @param lpFileSizeHigh pointer to high-order DWORD for compressed file size (or null if not needed) - * @return the low-order DWORD for compressed file siz - */ - private native int GetCompressedFileSizeW(WString lpFileName, IntByReference lpFileSizeHigh); - - /** - * Retrieves the actual number of bytes of disk storage used to store a specified file. If the file is located on a volume that supports - * compression and the file is compressed, the value obtained is the compressed size of the specified file. If the file is located on a - * volume that supports sparse files and the file is a sparse file, the value obtained is the sparse size of the specified file. - * - * This method uses Win32 DLL native method {@link #GetCompressedFileSizeW(WString, IntByReference)}. - * - * @param path the path to the file - * @return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file, or empty if the size is invalid - */ - public OptionalLong allocatedSizeInBytes(Path path) { - assert Files.isRegularFile(path) : path; - final WString fileName = new WString("\\\\?\\" + path); - final IntByReference lpFileSizeHigh = new IntByReference(); - - final int lpFileSizeLow = GetCompressedFileSizeW(fileName, lpFileSizeHigh); - if (lpFileSizeLow == INVALID_FILE_SIZE) { - final int err = Native.getLastError(); - if (err != NO_ERROR) { - logger.warn("error [{}] when executing native method GetCompressedFileSizeW for file [{}]", err, path); - return OptionalLong.empty(); - } - } - - // convert lpFileSizeLow to unsigned long and combine with signed/shifted lpFileSizeHigh - final long allocatedSize = (((long) lpFileSizeHigh.getValue()) << Integer.SIZE) | Integer.toUnsignedLong(lpFileSizeLow); - if (logger.isTraceEnabled()) { - logger.trace( - "executing native method GetCompressedFileSizeW returned [high={}, low={}, allocated={}] for file [{}]", - lpFileSizeHigh, - lpFileSizeLow, - allocatedSize, - path - ); - } - return OptionalLong.of(allocatedSize); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java index 7be964fc1be39..c4857a8b85ea3 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java @@ -32,7 +32,9 @@ public class RecyclerBytesStreamOutput extends BytesStream implements Releasable { static final VarHandle VH_BE_INT = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.BIG_ENDIAN); + static final VarHandle VH_LE_INT = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.LITTLE_ENDIAN); static final VarHandle VH_BE_LONG = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.BIG_ENDIAN); + static final VarHandle VH_LE_LONG = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.LITTLE_ENDIAN); private final ArrayList> pages = new ArrayList<>(); private final Recycler recycler; @@ -106,6 +108,17 @@ public void writeInt(int i) throws IOException { } } + @Override + public void writeIntLE(int i) throws IOException { + if (4 > (pageSize - currentPageOffset)) { + super.writeIntLE(i); + } else { + BytesRef currentPage = pages.get(pageIndex).v(); + VH_LE_INT.set(currentPage.bytes, currentPage.offset + currentPageOffset, i); + currentPageOffset += 4; + } + } + @Override public void writeLong(long i) throws IOException { if (8 > (pageSize - currentPageOffset)) { @@ -117,6 +130,17 @@ public void writeLong(long i) throws IOException { } } + @Override + public void writeLongLE(long i) throws IOException { + if (8 > (pageSize - currentPageOffset)) { + super.writeLongLE(i); + } else { + BytesRef currentPage = pages.get(pageIndex).v(); + VH_LE_LONG.set(currentPage.bytes, currentPage.offset + currentPageOffset, i); + currentPageOffset += 8; + } + } + @Override public void writeWithSizePrefix(Writeable writeable) throws IOException { // TODO: do this without copying the bytes from tmp by calling writeBytes and just use the pages in tmp directly through diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 833e7f27852c8..c245498333c94 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -190,6 +190,15 @@ public void writeInt(int i) throws IOException { writeBytes(buffer, 0, 4); } + /** + * Writes an int as four bytes, least significant bytes first. + */ + public void writeIntLE(int i) throws IOException { + final byte[] buffer = scratch.get(); + ByteUtils.writeIntLE(i, buffer, 0); + writeBytes(buffer, 0, 4); + } + /** * Writes an int in a variable-length format. Writes between one and * five bytes. Smaller values take fewer bytes. Negative numbers @@ -243,6 +252,15 @@ public void writeLong(long i) throws IOException { writeBytes(buffer, 0, 8); } + /** + * Writes a long as eight bytes. + */ + public void writeLongLE(long i) throws IOException { + final byte[] buffer = scratch.get(); + ByteUtils.writeLongLE(i, buffer, 0); + writeBytes(buffer, 0, 8); + } + /** * Writes a non-negative long in a variable-length format. Writes between one and ten bytes. Smaller values take fewer bytes. Negative * numbers use ten bytes and trip assertions (if running in tests) so prefer {@link #writeLong(long)} or {@link #writeZLong(long)} for @@ -442,6 +460,10 @@ public void writeDouble(double v) throws IOException { writeLong(Double.doubleToLongBits(v)); } + public void writeDoubleLE(double v) throws IOException { + writeLongLE(Double.doubleToLongBits(v)); + } + public void writeOptionalDouble(@Nullable Double v) throws IOException { if (v == null) { writeBoolean(false); diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index f3eff9ae8838c..fe6616cb4fb8e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.common.settings; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreDefinition; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -179,6 +180,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.LIFECYCLE_PARSE_ORIGINATION_DATE_SETTING, IndexSettings.TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING, IndexSettings.PREFER_ILM_SETTING, + DataStreamFailureStoreDefinition.FAILURE_STORE_DEFINITION_VERSION_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 55c421b87196d..ae8f8cb28da11 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -44,6 +44,7 @@ import static java.time.temporal.ChronoField.MONTH_OF_YEAR; import static java.time.temporal.ChronoField.NANO_OF_SECOND; import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; +import static org.elasticsearch.common.util.ArrayUtils.prepend; public class DateFormatters { @@ -202,7 +203,11 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), JAVA_TIME_PARSERS_ONLY ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { new Iso8601DateTimeParser(Set.of(), false).withLocale(Locale.ROOT), javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale( + Locale.ROOT + ), + javaTimeParser } ); } @@ -266,7 +271,13 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p JAVA_TIME_PARSERS_ONLY ? new DateTimeParser[] { javaTimeParser } : new DateTimeParser[] { - new Iso8601DateTimeParser(Set.of(HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), true).withLocale(Locale.ROOT), + new Iso8601DateTimeParser( + Set.of(HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + true, + null, + DecimalSeparator.BOTH, + TimezonePresence.OPTIONAL + ).withLocale(Locale.ROOT), javaTimeParser } ); } @@ -316,7 +327,11 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), JAVA_TIME_PARSERS_ONLY ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { new Iso8601DateTimeParser(Set.of(), false).withLocale(Locale.ROOT), javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale( + Locale.ROOT + ), + javaTimeParser } ); } @@ -739,24 +754,53 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p /* * A strict formatter that formats or parses a year and a month, such as '2011-12'. */ - private static final DateFormatter STRICT_YEAR_MONTH = newDateFormatter( - "strict_year_month", - new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) + private static final DateFormatter STRICT_YEAR_MONTH; + static { + DateTimeFormatter javaTimeFormatter = new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) .appendLiteral("-") .appendValue(MONTH_OF_YEAR, 2, 2, SignStyle.NOT_NEGATIVE) .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); + .withResolverStyle(ResolverStyle.STRICT); + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); + + STRICT_YEAR_MONTH = new JavaDateFormatter( + "strict_year_month", + new JavaTimeDateTimePrinter(javaTimeFormatter), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR), + false, + MONTH_OF_YEAR, + DecimalSeparator.BOTH, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT), + javaTimeParser } + ); + } /* * A strict formatter that formats or parses a year, such as '2011'. */ - private static final DateFormatter STRICT_YEAR = newDateFormatter( - "strict_year", - new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) + private static final DateFormatter STRICT_YEAR; + static { + DateTimeFormatter javaTimeFormatter = new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); + .withResolverStyle(ResolverStyle.STRICT); + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); + + STRICT_YEAR = new JavaDateFormatter( + "strict_year", + new JavaTimeDateTimePrinter(javaTimeFormatter), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser(Set.of(), false, ChronoField.YEAR, DecimalSeparator.BOTH, TimezonePresence.FORBIDDEN) + .withLocale(Locale.ROOT), + javaTimeParser } + ); + } /* * A strict formatter that formats or parses a hour, minute and second, such as '09:43:25'. @@ -787,18 +831,39 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * Returns a formatter that combines a full date and time, separated by a 'T' * (uuuu-MM-dd'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_DATE_TIME = newDateFormatter( - "strict_date_time", - STRICT_DATE_PRINTER, - new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT), - new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); + private static final DateFormatter STRICT_DATE_TIME; + static { + DateTimeParser[] javaTimeParsers = new DateTimeParser[] { + new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) + .appendZoneOrOffsetId() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) + .append(TIME_ZONE_FORMATTER_NO_COLON) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ) }; + + STRICT_DATE_TIME = new JavaDateFormatter( + "strict_date_time", + new JavaTimeDateTimePrinter(STRICT_DATE_PRINTER), + JAVA_TIME_PARSERS_ONLY + ? javaTimeParsers + : prepend( + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.MANDATORY + ).withLocale(Locale.ROOT), + javaTimeParsers + ) + ); + } private static final DateTimeFormatter STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE = new DateTimeFormatterBuilder().appendValue( ChronoField.YEAR, @@ -841,21 +906,44 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * Returns a formatter that combines a full date and time without millis, * separated by a 'T' (uuuu-MM-dd'T'HH:mm:ssZZ). */ - private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS = newDateFormatter( - "strict_date_time_no_millis", - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .appendOffset("+HH:MM", "Z") - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT), - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT), - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); + private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS; + static { + DateTimeParser[] javaTimeParsers = new DateTimeParser[] { + new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) + .appendZoneOrOffsetId() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) + .append(TIME_ZONE_FORMATTER_NO_COLON) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ) }; + + STRICT_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + "strict_date_time_no_millis", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) + .appendOffset("+HH:MM", "Z") + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + JAVA_TIME_PARSERS_ONLY + ? javaTimeParsers + : prepend( + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + SECOND_OF_MINUTE, + DecimalSeparator.BOTH, + TimezonePresence.MANDATORY + ).withLocale(Locale.ROOT), + javaTimeParsers + ) + ); + } // NOTE: this is not a strict formatter to retain the joda time based behaviour, even though it's named like this private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER = new DateTimeFormatterBuilder().append( @@ -891,37 +979,75 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * two digit minute of hour, two digit second of minute, and three digit * fraction of second (uuuu-MM-dd'T'HH:mm:ss.SSS). */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = newDateFormatter( - "strict_date_hour_minute_second_fraction", - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT), - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION; + static { + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + // this one here is lenient as well to retain joda time based bwc compatibility + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ); - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = newDateFormatter( - "strict_date_hour_minute_second_millis", - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT), - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); + STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( + "strict_date_hour_minute_second_fraction", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT), + javaTimeParser } + ); + } + + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS; + static { + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + // this one here is lenient as well to retain joda time based bwc compatibility + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ); + + STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + "strict_date_hour_minute_second_millis", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT), + javaTimeParser } + ); + } /* * Returns a formatter for a two digit hour of day. (HH) @@ -1235,10 +1361,27 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * two digit minute of hour, and two digit second of * minute. (uuuu-MM-dd'T'HH:mm:ss) */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND = newDateFormatter( - "strict_date_hour_minute_second", - DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT) - ); + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND; + static { + DateTimeFormatter javaTimeFormatter = DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT); + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); + + STRICT_DATE_HOUR_MINUTE_SECOND = new JavaDateFormatter( + "strict_date_hour_minute_second", + new JavaTimeDateTimePrinter(javaTimeFormatter), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + SECOND_OF_MINUTE, + DecimalSeparator.BOTH, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT), + javaTimeParser } + ); + } /* * A basic formatter for a full date as four digit year, two digit diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/main/java/org/elasticsearch/common/time/DecimalSeparator.java similarity index 77% rename from server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java rename to server/src/main/java/org/elasticsearch/common/time/DecimalSeparator.java index 7cb7d69ea4b6f..3598599e1f759 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/main/java/org/elasticsearch/common/time/DecimalSeparator.java @@ -6,6 +6,10 @@ * Side Public License, v 1. */ -package org.elasticsearch.search.geo; +package org.elasticsearch.common.time; -public class GeoShapeQueryTests extends GeoShapeQueryTestCase {} +enum DecimalSeparator { + DOT, + COMMA, + BOTH +} diff --git a/server/src/main/java/org/elasticsearch/common/time/Iso8601DateTimeParser.java b/server/src/main/java/org/elasticsearch/common/time/Iso8601DateTimeParser.java index cce4b13f4a166..027c1ec94a411 100644 --- a/server/src/main/java/org/elasticsearch/common/time/Iso8601DateTimeParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/Iso8601DateTimeParser.java @@ -24,8 +24,14 @@ class Iso8601DateTimeParser implements DateTimeParser { // and we already account for . or , in decimals private final Locale locale; - Iso8601DateTimeParser(Set mandatoryFields, boolean optionalTime) { - parser = new Iso8601Parser(mandatoryFields, optionalTime, Map.of()); + Iso8601DateTimeParser( + Set mandatoryFields, + boolean optionalTime, + ChronoField maxAllowedField, + DecimalSeparator decimalSeparator, + TimezonePresence timezonePresence + ) { + parser = new Iso8601Parser(mandatoryFields, optionalTime, maxAllowedField, decimalSeparator, timezonePresence, Map.of()); timezone = null; locale = null; } @@ -57,7 +63,18 @@ public DateTimeParser withLocale(Locale locale) { } Iso8601DateTimeParser withDefaults(Map defaults) { - return new Iso8601DateTimeParser(new Iso8601Parser(parser.mandatoryFields(), parser.optionalTime(), defaults), timezone, locale); + return new Iso8601DateTimeParser( + new Iso8601Parser( + parser.mandatoryFields(), + parser.optionalTime(), + parser.maxAllowedField(), + parser.decimalSeparator(), + parser.timezonePresence(), + defaults + ), + timezone, + locale + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java b/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java index fe92ff62b6ddc..6e420df9c72dd 100644 --- a/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java +++ b/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java @@ -13,16 +13,18 @@ import java.time.DateTimeException; import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoField; import java.util.EnumMap; import java.util.EnumSet; import java.util.Map; +import java.util.Objects; import java.util.Set; /** * Parses datetimes in ISO8601 format (and subsequences thereof). *

    - * This is faster than the generic parsing in {@link java.time.format.DateTimeFormatter}, as this is hard-coded and specific to ISO-8601. + * This is faster than the generic parsing in {@link DateTimeFormatter}, as this is hard-coded and specific to ISO-8601. * Various public libraries provide their own variant of this mechanism. We use our own for a few reasons: *

      *
    • @@ -37,13 +39,14 @@ */ class Iso8601Parser { - private static final Set VALID_MANDATORY_FIELDS = EnumSet.of( + private static final Set VALID_SPECIFIED_FIELDS = EnumSet.of( ChronoField.YEAR, ChronoField.MONTH_OF_YEAR, ChronoField.DAY_OF_MONTH, ChronoField.HOUR_OF_DAY, ChronoField.MINUTE_OF_HOUR, - ChronoField.SECOND_OF_MINUTE + ChronoField.SECOND_OF_MINUTE, + ChronoField.NANO_OF_SECOND ); private static final Set VALID_DEFAULT_FIELDS = EnumSet.of( @@ -57,31 +60,51 @@ class Iso8601Parser { private final Set mandatoryFields; private final boolean optionalTime; + @Nullable + private final ChronoField maxAllowedField; + private final DecimalSeparator decimalSeparator; + private final TimezonePresence timezonePresence; private final Map defaults; /** * Constructs a new {@code Iso8601Parser} object * - * @param mandatoryFields - * The set of fields that must be present for a valid parse. These should be specified in field order - * (eg if {@link ChronoField#DAY_OF_MONTH} is specified, {@link ChronoField#MONTH_OF_YEAR} should also be specified). - * {@link ChronoField#YEAR} is always mandatory. - * @param optionalTime - * {@code false} if the presence of time fields follows {@code mandatoryFields}, - * {@code true} if a time component is always optional, despite the presence of time fields in {@code mandatoryFields}. - * This makes it possible to specify 'time is optional, but if it is present, it must have these fields' - * by settings {@code optionalTime = true} and putting time fields such as {@link ChronoField#HOUR_OF_DAY} - * and {@link ChronoField#MINUTE_OF_HOUR} in {@code mandatoryFields}. - * @param defaults - * Map of default field values, if they are not present in the parsed string. + * @param mandatoryFields The set of fields that must be present for a valid parse. These should be specified in field order + * (eg if {@link ChronoField#DAY_OF_MONTH} is specified, + * {@link ChronoField#MONTH_OF_YEAR} should also be specified). + * {@link ChronoField#YEAR} is always mandatory. + * @param optionalTime {@code false} if the presence of time fields follows {@code mandatoryFields}, + * {@code true} if a time component is always optional, + * despite the presence of time fields in {@code mandatoryFields}. + * This makes it possible to specify 'time is optional, but if it is present, it must have these fields' + * by settings {@code optionalTime = true} and putting time fields such as {@link ChronoField#HOUR_OF_DAY} + * and {@link ChronoField#MINUTE_OF_HOUR} in {@code mandatoryFields}. + * @param maxAllowedField The most-specific field allowed in the parsed string, + * or {@code null} if everything up to nanoseconds is allowed. + * @param decimalSeparator The decimal separator that is allowed. + * @param timezonePresence Specifies if the timezone is optional, mandatory, or forbidden. + * @param defaults Map of default field values, if they are not present in the parsed string. */ - Iso8601Parser(Set mandatoryFields, boolean optionalTime, Map defaults) { - checkChronoFields(mandatoryFields, VALID_MANDATORY_FIELDS); + Iso8601Parser( + Set mandatoryFields, + boolean optionalTime, + @Nullable ChronoField maxAllowedField, + DecimalSeparator decimalSeparator, + TimezonePresence timezonePresence, + Map defaults + ) { + checkChronoFields(mandatoryFields, VALID_SPECIFIED_FIELDS); + if (maxAllowedField != null && VALID_SPECIFIED_FIELDS.contains(maxAllowedField) == false) { + throw new IllegalArgumentException("Invalid chrono field specified " + maxAllowedField); + } checkChronoFields(defaults.keySet(), VALID_DEFAULT_FIELDS); this.mandatoryFields = EnumSet.of(ChronoField.YEAR); // year is always mandatory this.mandatoryFields.addAll(mandatoryFields); this.optionalTime = optionalTime; + this.maxAllowedField = maxAllowedField; + this.decimalSeparator = Objects.requireNonNull(decimalSeparator); + this.timezonePresence = Objects.requireNonNull(timezonePresence); this.defaults = defaults.isEmpty() ? Map.of() : new EnumMap<>(defaults); } @@ -103,6 +126,18 @@ Set mandatoryFields() { return mandatoryFields; } + ChronoField maxAllowedField() { + return maxAllowedField; + } + + DecimalSeparator decimalSeparator() { + return decimalSeparator; + } + + TimezonePresence timezonePresence() { + return timezonePresence; + } + private boolean isOptional(ChronoField field) { return mandatoryFields.contains(field) == false; } @@ -186,7 +221,7 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { : ParseResult.error(4); } - if (str.charAt(4) != '-') return ParseResult.error(4); + if (str.charAt(4) != '-' || maxAllowedField == ChronoField.YEAR) return ParseResult.error(4); // MONTHS Integer months = parseInt(str, 5, 7); @@ -208,7 +243,7 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { : ParseResult.error(7); } - if (str.charAt(7) != '-') return ParseResult.error(7); + if (str.charAt(7) != '-' || maxAllowedField == ChronoField.MONTH_OF_YEAR) return ParseResult.error(7); // DAYS Integer days = parseInt(str, 8, 10); @@ -230,7 +265,7 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { : ParseResult.error(10); } - if (str.charAt(10) != 'T') return ParseResult.error(10); + if (str.charAt(10) != 'T' || maxAllowedField == ChronoField.DAY_OF_MONTH) return ParseResult.error(10); if (len == 11) { return isOptional(ChronoField.HOUR_OF_DAY) ? new ParseResult( @@ -252,7 +287,7 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { Integer hours = parseInt(str, 11, 13); if (hours == null || hours > 23) return ParseResult.error(11); if (len == 13) { - return isOptional(ChronoField.MINUTE_OF_HOUR) + return isOptional(ChronoField.MINUTE_OF_HOUR) && timezonePresence != TimezonePresence.MANDATORY ? new ParseResult( withZoneOffset( years, @@ -285,13 +320,13 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { : ParseResult.error(13); } - if (str.charAt(13) != ':') return ParseResult.error(13); + if (str.charAt(13) != ':' || maxAllowedField == ChronoField.HOUR_OF_DAY) return ParseResult.error(13); // MINUTES + timezone Integer minutes = parseInt(str, 14, 16); if (minutes == null || minutes > 59) return ParseResult.error(14); if (len == 16) { - return isOptional(ChronoField.SECOND_OF_MINUTE) + return isOptional(ChronoField.SECOND_OF_MINUTE) && timezonePresence != TimezonePresence.MANDATORY ? new ParseResult( withZoneOffset( years, @@ -324,15 +359,17 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { : ParseResult.error(16); } - if (str.charAt(16) != ':') return ParseResult.error(16); + if (str.charAt(16) != ':' || maxAllowedField == ChronoField.MINUTE_OF_HOUR) return ParseResult.error(16); // SECONDS + timezone Integer seconds = parseInt(str, 17, 19); if (seconds == null || seconds > 59) return ParseResult.error(17); if (len == 19) { - return new ParseResult( - withZoneOffset(years, months, days, hours, minutes, seconds, defaultZero(ChronoField.NANO_OF_SECOND), defaultTimezone) - ); + return isOptional(ChronoField.NANO_OF_SECOND) && timezonePresence != TimezonePresence.MANDATORY + ? new ParseResult( + withZoneOffset(years, months, days, hours, minutes, seconds, defaultZero(ChronoField.NANO_OF_SECOND), defaultTimezone) + ) + : ParseResult.error(19); } if (isZoneId(str, 19)) { ZoneId timezone = parseZoneId(str, 19); @@ -343,11 +380,9 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { : ParseResult.error(19); } - char decSeparator = str.charAt(19); - if (decSeparator != '.' && decSeparator != ',') return ParseResult.error(19); + if (checkDecimalSeparator(str.charAt(19)) == false || maxAllowedField == ChronoField.SECOND_OF_MINUTE) return ParseResult.error(19); // NANOS + timezone - // nanos are always optional // the last number could be millis or nanos, or any combination in the middle // so we keep parsing numbers until we get to not a number int nanos = 0; @@ -364,7 +399,9 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { nanos *= NANO_MULTIPLICANDS[29 - pos]; if (len == pos) { - return new ParseResult(withZoneOffset(years, months, days, hours, minutes, seconds, nanos, defaultTimezone)); + return timezonePresence != TimezonePresence.MANDATORY + ? new ParseResult(withZoneOffset(years, months, days, hours, minutes, seconds, nanos, defaultTimezone)) + : ParseResult.error(pos); } if (isZoneId(str, pos)) { ZoneId timezone = parseZoneId(str, pos); @@ -377,6 +414,16 @@ private ParseResult parse(CharSequence str, @Nullable ZoneId defaultTimezone) { return ParseResult.error(pos); } + private boolean checkDecimalSeparator(char separator) { + boolean isDot = separator == '.'; + boolean isComma = separator == ','; + return switch (decimalSeparator) { + case DOT -> isDot; + case COMMA -> isComma; + case BOTH -> isDot || isComma; + }; + } + private static boolean isZoneId(CharSequence str, int pos) { // all region zoneIds must start with [A-Za-z] (see ZoneId#of) // this also covers Z and UT/UTC/GMT zone variants @@ -385,10 +432,14 @@ private static boolean isZoneId(CharSequence str, int pos) { } /** - * This parses the zone offset, which is of the format accepted by {@link java.time.ZoneId#of(String)}. + * This parses the zone offset, which is of the format accepted by {@link ZoneId#of(String)}. * It has fast paths for numerical offsets, but falls back on {@code ZoneId.of} for non-trivial zone ids. */ private ZoneId parseZoneId(CharSequence str, int pos) { + if (timezonePresence == TimezonePresence.FORBIDDEN) { + return null; + } + int len = str.length(); char first = str.charAt(pos); diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index e8d729f9e9977..79b0c44d39108 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -18,7 +18,6 @@ import java.time.temporal.TemporalAccessor; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -149,19 +148,24 @@ static DateFormatter combined(String input, List formatters) { assert formatters.isEmpty() == false; DateTimePrinter printer = null; - List parsers = new ArrayList<>(formatters.size()); - List roundUpParsers = new ArrayList<>(formatters.size()); + List parsers = new ArrayList<>(formatters.size()); + List roundUpParsers = new ArrayList<>(formatters.size()); for (DateFormatter formatter : formatters) { JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; if (printer == null) { printer = javaDateFormatter.printer; } - Collections.addAll(parsers, javaDateFormatter.parsers); - Collections.addAll(roundUpParsers, javaDateFormatter.roundupParsers); + parsers.add(javaDateFormatter.parsers); + roundUpParsers.add(javaDateFormatter.roundupParsers); } - return new JavaDateFormatter(input, printer, roundUpParsers.toArray(DateTimeParser[]::new), parsers.toArray(DateTimeParser[]::new)); + return new JavaDateFormatter( + input, + printer, + roundUpParsers.stream().flatMap(Arrays::stream).toArray(DateTimeParser[]::new), + parsers.stream().flatMap(Arrays::stream).toArray(DateTimeParser[]::new) + ); } private JavaDateFormatter(String format, DateTimePrinter printer, DateTimeParser[] roundupParsers, DateTimeParser[] parsers) { diff --git a/server/src/main/java/org/elasticsearch/common/time/TimezonePresence.java b/server/src/main/java/org/elasticsearch/common/time/TimezonePresence.java new file mode 100644 index 0000000000000..fd8cdcc28976d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/TimezonePresence.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +enum TimezonePresence { + OPTIONAL, + MANDATORY, + FORBIDDEN +} diff --git a/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java index bfe4e18367a74..274a4e67367c7 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java @@ -9,6 +9,9 @@ package org.elasticsearch.common.unit; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.monitor.jvm.JvmInfo; import java.util.Objects; @@ -25,18 +28,49 @@ public enum MemorySizeValue { public static ByteSizeValue parseBytesSizeValueOrHeapRatio(String sValue, String settingName) { settingName = Objects.requireNonNull(settingName); if (sValue != null && sValue.endsWith("%")) { - final String percentAsString = sValue.substring(0, sValue.length() - 1); - try { - final double percent = Double.parseDouble(percentAsString); - if (percent < 0 || percent > 100) { - throw new ElasticsearchParseException("percentage should be in [0-100], got [{}]", percentAsString); - } - return ByteSizeValue.ofBytes((long) ((percent / 100) * JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())); - } catch (NumberFormatException e) { - throw new ElasticsearchParseException("failed to parse [{}] as a double", e, percentAsString); - } + return parseHeapRatio(sValue, settingName, 0); } else { return parseBytesSizeValue(sValue, settingName); } } + + public static ByteSizeValue parseHeapRatioOrDeprecatedByteSizeValue(String sValue, String settingName, double minHeapPercent) { + settingName = Objects.requireNonNull(settingName); + if (sValue != null && sValue.endsWith("%")) { + return parseHeapRatio(sValue, settingName, minHeapPercent); + } else { + DeprecationLogger.getLogger(BalancedShardsAllocator.class) + .critical( + DeprecationCategory.SETTINGS, + "absolute_size_not_supported", + "[{}] should be specified using a percentage of the heap. Absolute size settings will be forbidden in a future release", + settingName + ); + return parseBytesSizeValue(sValue, settingName); + } + } + + private static ByteSizeValue parseHeapRatio(String sValue, String settingName, double minHeapPercent) { + final String percentAsString = sValue.substring(0, sValue.length() - 1); + try { + final double percent = Double.parseDouble(percentAsString); + if (percent < 0 || percent > 100) { + throw new ElasticsearchParseException("percentage should be in [0-100], got [{}]", percentAsString); + } else if (percent < minHeapPercent) { + DeprecationLogger.getLogger(MemorySizeValue.class) + .warn( + DeprecationCategory.SETTINGS, + "memory_size_below_minimum", + "[{}] setting of [{}] is below the recommended minimum of {}% of the heap", + settingName, + sValue, + minHeapPercent + ); + } + return ByteSizeValue.ofBytes((long) ((percent / 100) * JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())); + } catch (NumberFormatException e) { + throw new ElasticsearchParseException("failed to parse [{}] as a double", e, percentAsString); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java index 2f1264fa88247..0b48a298fe59a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java @@ -68,6 +68,21 @@ public static T[] concat(T[] one, T[] other) { return target; } + /** + * Copy the given element and array into a new array of size {@code array.length + 1}. + * @param added first element in the newly created array + * @param array array to copy to the end of new returned array copy + * @return copy that contains added element and array + * @param type of the array elements + */ + public static T[] prepend(T added, T[] array) { + @SuppressWarnings("unchecked") + T[] updated = (T[]) Array.newInstance(array.getClass().getComponentType(), array.length + 1); + updated[0] = added; + System.arraycopy(array, 0, updated, 1, array.length); + return updated; + } + /** * Copy the given array and the added element into a new array of size {@code array.length + 1}. * @param array array to copy to the beginning of new returned array copy @@ -76,9 +91,7 @@ public static T[] concat(T[] one, T[] other) { * @param type of the array elements */ public static T[] append(T[] array, T added) { - @SuppressWarnings("unchecked") - final T[] updated = (T[]) Array.newInstance(added.getClass(), array.length + 1); - System.arraycopy(array, 0, updated, 0, array.length); + T[] updated = Arrays.copyOf(array, array.length + 1); updated[array.length] = added; return updated; } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 1e714f122d885..06ee78d99c532 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -41,15 +41,15 @@ public void writeTo(StreamOutput out) throws IOException { @Override public byte get(long index) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); return pages[pageIndex][indexInPage]; } @Override public void set(long index, byte value) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); page[indexInPage] = value; } @@ -61,9 +61,9 @@ public boolean get(long index, int len, BytesRef ref) { ref.length = 0; return false; } - int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); - if (indexInPage + len <= pageSize()) { + int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); + if (indexInPage + len <= BYTE_PAGE_SIZE) { ref.bytes = pages[pageIndex]; ref.offset = indexInPage; ref.length = len; @@ -71,11 +71,11 @@ public boolean get(long index, int len, BytesRef ref) { } else { ref.bytes = new byte[len]; ref.offset = 0; - ref.length = pageSize() - indexInPage; + ref.length = BYTE_PAGE_SIZE - indexInPage; System.arraycopy(pages[pageIndex], indexInPage, ref.bytes, 0, ref.length); do { ++pageIndex; - final int copyLength = Math.min(pageSize(), len - ref.length); + final int copyLength = Math.min(BYTE_PAGE_SIZE, len - ref.length); System.arraycopy(pages[pageIndex], 0, ref.bytes, ref.length, copyLength); ref.length += copyLength; } while (ref.length < len); @@ -86,18 +86,18 @@ public boolean get(long index, int len, BytesRef ref) { @Override public void set(long index, byte[] buf, int offset, int len) { assert index + len <= size(); - int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); - if (indexInPage + len <= pageSize()) { + int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); + if (indexInPage + len <= BYTE_PAGE_SIZE) { System.arraycopy(buf, offset, getPageForWriting(pageIndex), indexInPage, len); } else { - int copyLen = pageSize() - indexInPage; + int copyLen = BYTE_PAGE_SIZE - indexInPage; System.arraycopy(buf, offset, getPageForWriting(pageIndex), indexInPage, copyLen); do { ++pageIndex; offset += copyLen; len -= copyLen; - copyLen = Math.min(len, pageSize()); + copyLen = Math.min(len, BYTE_PAGE_SIZE); System.arraycopy(buf, offset, getPageForWriting(pageIndex), 0, copyLen); } while (len > copyLen); } @@ -108,16 +108,16 @@ public void fill(long fromIndex, long toIndex, byte value) { if (fromIndex > toIndex) { throw new IllegalArgumentException(); } - final int fromPage = pageIndex(fromIndex); - final int toPage = pageIndex(toIndex - 1); + final int fromPage = pageIdx(fromIndex); + final int toPage = pageIdx(toIndex - 1); if (fromPage == toPage) { - Arrays.fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + Arrays.fill(getPageForWriting(fromPage), idxInPage(fromIndex), idxInPage(toIndex - 1) + 1, value); } else { - Arrays.fill(getPageForWriting(fromPage), indexInPage(fromIndex), pages[fromPage].length, value); + Arrays.fill(getPageForWriting(fromPage), idxInPage(fromIndex), pages[fromPage].length, value); for (int i = fromPage + 1; i < toPage; ++i) { Arrays.fill(getPageForWriting(i), value); } - Arrays.fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); + Arrays.fill(getPageForWriting(toPage), 0, idxInPage(toIndex - 1) + 1, value); } } @@ -169,4 +169,14 @@ public static long estimateRamBytes(final long size) { return ESTIMATOR.ramBytesEstimated(size); } + private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(PAGE_SIZE_IN_BYTES); + + private static int pageIdx(long index) { + return (int) (index >>> PAGE_SHIFT); + } + + private static int idxInPage(long index) { + return (int) (index & PAGE_SIZE_IN_BYTES - 1); + } + } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index 3135ebb293070..cfd44d82c757e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -36,23 +36,23 @@ final class BigDoubleArray extends AbstractBigByteArray implements DoubleArray { @Override public double get(long index) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); return (double) VH_PLATFORM_NATIVE_DOUBLE.get(pages[pageIndex], indexInPage << 3); } @Override public void set(long index, double value) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); VH_PLATFORM_NATIVE_DOUBLE.set(page, indexInPage << 3, value); } @Override public double increment(long index, double inc) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); final double newVal = (double) VH_PLATFORM_NATIVE_DOUBLE.get(page, indexInPage << 3) + inc; VH_PLATFORM_NATIVE_DOUBLE.set(page, indexInPage << 3, newVal); @@ -69,16 +69,16 @@ public void fill(long fromIndex, long toIndex, double value) { if (fromIndex > toIndex) { throw new IllegalArgumentException(); } - final int fromPage = pageIndex(fromIndex); - final int toPage = pageIndex(toIndex - 1); + final int fromPage = pageIdx(fromIndex); + final int toPage = pageIdx(toIndex - 1); if (fromPage == toPage) { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), idxInPage(toIndex - 1) + 1, value); } else { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), DOUBLE_PAGE_SIZE, value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(getPageForWriting(i), 0, pageSize(), value); + fill(getPageForWriting(i), 0, DOUBLE_PAGE_SIZE, value); } - fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, idxInPage(toIndex - 1) + 1, value); } } @@ -108,4 +108,14 @@ public void set(long index, byte[] buf, int offset, int len) { public void writeTo(StreamOutput out) throws IOException { writePages(out, size, pages, Double.BYTES); } + + private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(DOUBLE_PAGE_SIZE); + + private static int pageIdx(long index) { + return (int) (index >>> PAGE_SHIFT); + } + + private static int idxInPage(long index) { + return (int) (index & DOUBLE_PAGE_SIZE - 1); + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java b/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java index 380b2c8e12b34..704a47d60473f 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigFloatArray.java @@ -31,16 +31,16 @@ final class BigFloatArray extends AbstractBigByteArray implements FloatArray { @Override public void set(long index, float value) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); VH_PLATFORM_NATIVE_FLOAT.set(page, indexInPage << 2, value); } @Override public float get(long index) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); return (float) VH_PLATFORM_NATIVE_FLOAT.get(pages[pageIndex], indexInPage << 2); } @@ -54,16 +54,16 @@ public void fill(long fromIndex, long toIndex, float value) { if (fromIndex > toIndex) { throw new IllegalArgumentException(); } - final int fromPage = pageIndex(fromIndex); - final int toPage = pageIndex(toIndex - 1); + final int fromPage = pageIdx(fromIndex); + final int toPage = pageIdx(toIndex - 1); if (fromPage == toPage) { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), idxInPage(toIndex - 1) + 1, value); } else { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), FLOAT_PAGE_SIZE, value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(getPageForWriting(i), 0, pageSize(), value); + fill(getPageForWriting(i), 0, FLOAT_PAGE_SIZE, value); } - fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, idxInPage(toIndex - 1) + 1, value); } } @@ -83,4 +83,14 @@ public static long estimateRamBytes(final long size) { public void set(long index, byte[] buf, int offset, int len) { set(index, buf, offset, len, 2); } + + private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(FLOAT_PAGE_SIZE); + + private static int pageIdx(long index) { + return (int) (index >>> PAGE_SHIFT); + } + + private static int idxInPage(long index) { + return (int) (index & FLOAT_PAGE_SIZE - 1); + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java index 9ce9842c337c0..5e9bccebdd0b5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -40,15 +40,15 @@ public void writeTo(StreamOutput out) throws IOException { @Override public int get(long index) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); return (int) VH_PLATFORM_NATIVE_INT.get(pages[pageIndex], indexInPage << 2); } @Override public int getAndSet(long index, int value) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); final int ret = (int) VH_PLATFORM_NATIVE_INT.get(page, indexInPage << 2); VH_PLATFORM_NATIVE_INT.set(page, indexInPage << 2, value); @@ -57,15 +57,15 @@ public int getAndSet(long index, int value) { @Override public void set(long index, int value) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); VH_PLATFORM_NATIVE_INT.set(getPageForWriting(pageIndex), indexInPage << 2, value); } @Override public int increment(long index, int inc) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); final int newVal = (int) VH_PLATFORM_NATIVE_INT.get(page, indexInPage << 2) + inc; VH_PLATFORM_NATIVE_INT.set(page, indexInPage << 2, newVal); @@ -77,16 +77,16 @@ public void fill(long fromIndex, long toIndex, int value) { if (fromIndex > toIndex) { throw new IllegalArgumentException(); } - final int fromPage = pageIndex(fromIndex); - final int toPage = pageIndex(toIndex - 1); + final int fromPage = pageIdx(fromIndex); + final int toPage = pageIdx(toIndex - 1); if (fromPage == toPage) { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), idxInPage(toIndex - 1) + 1, value); } else { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), INT_PAGE_SIZE, value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(getPageForWriting(i), 0, pageSize(), value); + fill(getPageForWriting(i), 0, INT_PAGE_SIZE, value); } - fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, idxInPage(toIndex - 1) + 1, value); } } @@ -116,4 +116,14 @@ public static long estimateRamBytes(final long size) { public void set(long index, byte[] buf, int offset, int len) { set(index, buf, offset, len, 2); } + + private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(INT_PAGE_SIZE); + + private static int pageIdx(long index) { + return (int) (index >>> PAGE_SHIFT); + } + + private static int idxInPage(long index) { + return (int) (index & INT_PAGE_SIZE - 1); + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java index 7d23e06f87658..aee57feca66f4 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -35,15 +35,15 @@ final class BigLongArray extends AbstractBigByteArray implements LongArray { @Override public long get(long index) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); return (long) VH_PLATFORM_NATIVE_LONG.get(pages[pageIndex], indexInPage << 3); } @Override public long getAndSet(long index, long value) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); final long ret = (long) VH_PLATFORM_NATIVE_LONG.get(page, indexInPage << 3); VH_PLATFORM_NATIVE_LONG.set(page, indexInPage << 3, value); @@ -52,16 +52,16 @@ public long getAndSet(long index, long value) { @Override public void set(long index, long value) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); VH_PLATFORM_NATIVE_LONG.set(page, indexInPage << 3, value); } @Override public long increment(long index, long inc) { - final int pageIndex = pageIndex(index); - final int indexInPage = indexInPage(index); + final int pageIndex = pageIdx(index); + final int indexInPage = idxInPage(index); final byte[] page = getPageForWriting(pageIndex); final long newVal = (long) VH_PLATFORM_NATIVE_LONG.get(page, indexInPage << 3) + inc; VH_PLATFORM_NATIVE_LONG.set(page, indexInPage << 3, newVal); @@ -81,16 +81,16 @@ public void fill(long fromIndex, long toIndex, long value) { if (fromIndex == toIndex) { return; // empty range } - final int fromPage = pageIndex(fromIndex); - final int toPage = pageIndex(toIndex - 1); + final int fromPage = pageIdx(fromIndex); + final int toPage = pageIdx(toIndex - 1); if (fromPage == toPage) { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), idxInPage(toIndex - 1) + 1, value); } else { - fill(getPageForWriting(fromPage), indexInPage(fromIndex), pageSize(), value); + fill(getPageForWriting(fromPage), idxInPage(fromIndex), LONG_PAGE_SIZE, value); for (int i = fromPage + 1; i < toPage; ++i) { - fill(getPageForWriting(i), 0, pageSize(), value); + fill(getPageForWriting(i), 0, LONG_PAGE_SIZE, value); } - fill(getPageForWriting(toPage), 0, indexInPage(toIndex - 1) + 1, value); + fill(getPageForWriting(toPage), 0, idxInPage(toIndex - 1) + 1, value); } } @@ -130,4 +130,14 @@ static void writePages(StreamOutput out, long size, byte[][] pages, int bytesPer remainedBytes -= len; } } + + private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(LONG_PAGE_SIZE); + + private static int pageIdx(long index) { + return (int) (index >>> PAGE_SHIFT); + } + + private static int idxInPage(long index) { + return (int) (index & LONG_PAGE_SIZE - 1); + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java index 53244a0f2888a..041111840056d 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -64,6 +64,17 @@ public void writeTo(StreamOutput out) throws IOException { bits.writeTo(out); } + /** + * Set or clear the {@code index}th bit based on the specified value. + */ + public void set(long index, boolean value) { + if (value) { + set(index); + } else { + clear(index); + } + } + /** * Set the {@code index}th bit. */ @@ -158,6 +169,68 @@ public boolean get(long index) { return (bits.get(wordNum) & bitmask) != 0; } + /** + * Set or clear slots between {@code fromIndex} inclusive to {@code toIndex} based on {@code value}. + */ + public void fill(long fromIndex, long toIndex, boolean value) { + if (fromIndex > toIndex) { + throw new IllegalArgumentException("From should be less than or equal to toIndex"); + } + long currentSize = size(); + if (value == false) { + // There's no need to grow the array just to clear bits. + toIndex = Math.min(toIndex, currentSize); + } + if (fromIndex == toIndex) { + return; // Empty range + } + + if (toIndex > currentSize) { + bits = bigArrays.grow(bits, wordNum(toIndex) + 1); + } + + int wordLength = Long.BYTES * Byte.SIZE; + long fullWord = 0xFFFFFFFFFFFFFFFFL; + + long firstWordIndex = fromIndex % wordLength; + long lastWordIndex = toIndex % wordLength; + + long firstWordNum = wordNum(fromIndex); + long lastWordNum = wordNum(toIndex - 1); + + // Mask first word + if (firstWordIndex > 0) { + long mask = fullWord << firstWordIndex; + + if (firstWordNum == lastWordNum) { + mask &= fullWord >>> (wordLength - lastWordIndex); + } + + if (value) { + bits.set(firstWordNum, bits.get(firstWordNum) | mask); + } else { + bits.set(firstWordNum, bits.get(firstWordNum) & ~mask); + } + + firstWordNum++; + } + + // Mask last word + if (firstWordNum <= lastWordNum) { + long mask = fullWord >>> (wordLength - lastWordIndex); + + if (value) { + bits.set(lastWordNum, bits.get(lastWordNum) | mask); + } else { + bits.set(lastWordNum, bits.get(lastWordNum) & ~mask); + } + } + + if (firstWordNum < lastWordNum) { + bits.fill(firstWordNum, lastWordNum, value ? fullWord : 0L); + } + } + public long size() { return bits.size() * (long) Long.BYTES * Byte.SIZE; } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java index 6a67f41ab8004..3a21ea486ce39 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionException.java @@ -27,6 +27,11 @@ public EsRejectedExecutionException() { this(null, false); } + @Override + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace + } + /** * Checks if the thread pool that rejected the execution was terminated * shortly after the rejection. Its possible that this returns false and the diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index 209faa7207be1..d234c1797e090 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -12,9 +12,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -26,6 +28,7 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportRequestOptions.Type; import org.elasticsearch.transport.TransportService; @@ -72,11 +75,26 @@ public HandshakingTransportAddressConnector(Settings settings, TransportService @Override public void connectToRemoteMasterNode(TransportAddress transportAddress, ActionListener listener) { - try { + new ConnectionAttempt(transportAddress).run(listener); + } + + private class ConnectionAttempt { + private final TransportAddress transportAddress; + + ConnectionAttempt(TransportAddress transportAddress) { + this.transportAddress = transportAddress; + } + + void run(ActionListener listener) { + SubscribableListener.newForked(this::openProbeConnection) + .andThen(this::handshakeProbeConnection) + .andThen(this::openFullConnection) + .addListener(listener); + } + private void openProbeConnection(ActionListener listener) { // We could skip this if the transportService were already connected to the given address, but the savings would be minimal so // we open a new connection anyway. - logger.trace("[{}] opening probe connection", transportAddress); transportService.openConnection( new DiscoveryNode( @@ -95,98 +113,91 @@ public void connectToRemoteMasterNode(TransportAddress transportAddress, ActionL ) ), handshakeConnectionProfile, - listener.delegateFailure((l, connection) -> { - logger.trace("[{}] opened probe connection", transportAddress); - final var probeHandshakeTimeout = handshakeConnectionProfile.getHandshakeTimeout(); - // use NotifyOnceListener to make sure the following line does not result in onFailure being called when - // the connection is closed in the onResponse handler - transportService.handshake(connection, probeHandshakeTimeout, ActionListener.notifyOnce(new ActionListener<>() { - - @Override - public void onResponse(DiscoveryNode remoteNode) { - try { - // success means (amongst other things) that the cluster names match - logger.trace("[{}] handshake successful: {}", transportAddress, remoteNode); - IOUtils.closeWhileHandlingException(connection); - - if (remoteNode.equals(transportService.getLocalNode())) { - listener.onFailure( - new ConnectTransportException( - remoteNode, - String.format( - Locale.ROOT, - "successfully discovered local node %s at [%s]", - remoteNode.descriptionWithoutAttributes(), - transportAddress - ) - ) - ); - } else if (remoteNode.isMasterNode() == false) { - listener.onFailure( - new ConnectTransportException( - remoteNode, - String.format( - Locale.ROOT, - """ - successfully discovered master-ineligible node %s at [%s]; to suppress this message, \ - remove address [%s] from your discovery configuration or ensure that traffic to this \ - address is routed only to master-eligible nodes""", - remoteNode.descriptionWithoutAttributes(), - transportAddress, - transportAddress - ) - ) - ); - } else { - transportService.connectToNode(remoteNode, new ActionListener<>() { - @Override - public void onResponse(Releasable connectionReleasable) { - logger.trace("[{}] completed full connection with [{}]", transportAddress, remoteNode); - listener.onResponse(new ProbeConnectionResult(remoteNode, connectionReleasable)); - } - - @Override - public void onFailure(Exception e) { - // we opened a connection and successfully performed a handshake, so we're definitely - // talking to a master-eligible node with a matching cluster name and a good version, but - // the attempt to open a full connection to its publish address failed; a common reason is - // that the remote node is listening on 0.0.0.0 but has made an inappropriate choice for its - // publish address. - logger.warn( - () -> format( - "completed handshake with [%s] at [%s] but followup connection to [%s] failed", - remoteNode.descriptionWithoutAttributes(), - transportAddress, - remoteNode.getAddress() - ), - e - ); - listener.onFailure(e); - } - }); - } - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - // we opened a connection and successfully performed a low-level handshake, so we were definitely - // talking to an Elasticsearch node, but the high-level handshake failed indicating some kind of - // mismatched configurations (e.g. cluster name) that the user should address - logger.warn(() -> "handshake to [" + transportAddress + "] failed", e); - IOUtils.closeWhileHandlingException(connection); - listener.onFailure(e); - } - - })); - - }) + ActionListener.assertOnce(listener) ); + } + + private void handshakeProbeConnection(ActionListener listener, Transport.Connection connection) { + logger.trace("[{}] opened probe connection", transportAddress); + final var probeHandshakeTimeout = handshakeConnectionProfile.getHandshakeTimeout(); + transportService.handshake(connection, probeHandshakeTimeout, ActionListener.assertOnce(new ActionListener<>() { + @Override + public void onResponse(DiscoveryNode remoteNode) { + // success means (amongst other things) that the cluster names match + logger.trace("[{}] handshake successful: {}", transportAddress, remoteNode); + IOUtils.closeWhileHandlingException(connection); + listener.onResponse(remoteNode); + } + + @Override + public void onFailure(Exception e) { + // We opened a connection and successfully performed a low-level handshake, so we were definitely talking to an + // Elasticsearch node, but the high-level handshake failed indicating some kind of mismatched configurations (e.g. + // cluster name) that the user should address. + logger.warn(() -> "handshake to [" + transportAddress + "] failed", e); + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + } + })); + } - } catch (Exception e) { - listener.onFailure(e); + private void openFullConnection(ActionListener listener, DiscoveryNode remoteNode) { + if (remoteNode.equals(transportService.getLocalNode())) { + throw new ConnectTransportException( + remoteNode, + String.format( + Locale.ROOT, + "successfully discovered local node %s at [%s]", + remoteNode.descriptionWithoutAttributes(), + transportAddress + ) + ); + } + + if (remoteNode.isMasterNode() == false) { + throw new ConnectTransportException( + remoteNode, + String.format( + Locale.ROOT, + """ + successfully discovered master-ineligible node %s at [%s]; to suppress this message, remove address [%s] from \ + your discovery configuration or ensure that traffic to this address is routed only to master-eligible nodes""", + remoteNode.descriptionWithoutAttributes(), + transportAddress, + transportAddress + ) + ); + } + + transportService.connectToNode(remoteNode, ActionListener.assertOnce(new ActionListener<>() { + @Override + public void onResponse(Releasable connectionReleasable) { + logger.trace("[{}] completed full connection with [{}]", transportAddress, remoteNode); + listener.onResponse(new ProbeConnectionResult(remoteNode, connectionReleasable)); + } + + @Override + public void onFailure(Exception e) { + // We opened a connection and successfully performed a handshake, so we're definitely talking to a master-eligible node + // with a matching cluster name and a good version, but the attempt to open a full connection to its publish address + // failed; a common reason is that the remote node is listening on 0.0.0.0 but has made an inappropriate choice for its + // publish address. + logger.warn( + () -> format( + """ + Successfully discovered master-eligible node [%s] at address [%s] but could not connect to it at its \ + publish address of [%s]. Each node in a cluster must be accessible at its publish address by all other \ + nodes in the cluster. See %s for more information.""", + remoteNode.descriptionWithoutAttributes(), + transportAddress, + remoteNode.getAddress(), + ReferenceDocs.NETWORK_BINDING_AND_PUBLISHING + ), + e + ); + listener.onFailure(e); + } + })); } } } diff --git a/server/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java b/server/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java index 0bd62627cb061..0f982e0e0ec65 100644 --- a/server/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java +++ b/server/src/main/java/org/elasticsearch/discovery/MasterNotDiscoveredException.java @@ -32,4 +32,9 @@ public RestStatus status() { public MasterNotDiscoveredException(StreamInput in) throws IOException { super(in); } + + @Override + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace + } } diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index 83660cede004e..11f3bbdc13bbf 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.cluster.coordination.ClusterFormationFailureHelper; import org.elasticsearch.cluster.coordination.PeersResponse; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -413,86 +414,90 @@ void establishConnection() { - activatedAtMillis > verbosityIncreaseTimeout.millis(); logger.trace("{} attempting connection", this); - transportAddressConnector.connectToRemoteMasterNode(transportAddress, new ActionListener() { - @Override - public void onResponse(ProbeConnectionResult connectResult) { - assert holdsLock() == false : "PeerFinder mutex is held in error"; - final DiscoveryNode remoteNode = connectResult.getDiscoveryNode(); - assert remoteNode.isMasterNode() : remoteNode + " is not master-eligible"; - assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node"; - boolean retainConnection = false; - try { - synchronized (mutex) { - if (isActive() == false) { - logger.trace("Peer#establishConnection inactive: {}", Peer.this); - return; + transportAddressConnector.connectToRemoteMasterNode( + transportAddress, + // may be completed on the calling thread, and therefore under the mutex, so must always fork + new ThreadedActionListener<>(clusterCoordinationExecutor, new ActionListener<>() { + @Override + public void onResponse(ProbeConnectionResult connectResult) { + assert holdsLock() == false : "PeerFinder mutex is held in error"; + final DiscoveryNode remoteNode = connectResult.getDiscoveryNode(); + assert remoteNode.isMasterNode() : remoteNode + " is not master-eligible"; + assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node"; + boolean retainConnection = false; + try { + synchronized (mutex) { + if (isActive() == false) { + logger.trace("Peer#establishConnection inactive: {}", Peer.this); + return; + } + + assert probeConnectionResult.get() == null + : "connection result unexpectedly already set to " + probeConnectionResult.get(); + probeConnectionResult.set(connectResult); + + requestPeers(); } - assert probeConnectionResult.get() == null - : "connection result unexpectedly already set to " + probeConnectionResult.get(); - probeConnectionResult.set(connectResult); - - requestPeers(); - } - - onFoundPeersUpdated(); + onFoundPeersUpdated(); - retainConnection = true; - } finally { - if (retainConnection == false) { - Releasables.close(connectResult); + retainConnection = true; + } finally { + if (retainConnection == false) { + Releasables.close(connectResult); + } } } - } - @Override - public void onFailure(Exception e) { - if (verboseFailureLogging) { - - final String believedMasterBy; - synchronized (mutex) { - believedMasterBy = peersByAddress.values() - .stream() - .filter(p -> p.lastKnownMasterNode.map(DiscoveryNode::getAddress).equals(Optional.of(transportAddress))) - .findFirst() - .map(p -> " [current master according to " + p.getDiscoveryNode().descriptionWithoutAttributes() + "]") - .orElse(""); - } + @Override + public void onFailure(Exception e) { + if (verboseFailureLogging) { + + final String believedMasterBy; + synchronized (mutex) { + believedMasterBy = peersByAddress.values() + .stream() + .filter(p -> p.lastKnownMasterNode.map(DiscoveryNode::getAddress).equals(Optional.of(transportAddress))) + .findFirst() + .map(p -> " [current master according to " + p.getDiscoveryNode().descriptionWithoutAttributes() + "]") + .orElse(""); + } - if (logger.isDebugEnabled()) { - // log message at level WARN, but since DEBUG logging is enabled we include the full stack trace - logger.warn(() -> format("%s%s discovery result", Peer.this, believedMasterBy), e); - } else { - final StringBuilder messageBuilder = new StringBuilder(); - Throwable cause = e; - while (cause != null && messageBuilder.length() <= 1024) { - messageBuilder.append(": ").append(cause.getMessage()); - cause = cause.getCause(); + if (logger.isDebugEnabled()) { + // log message at level WARN, but since DEBUG logging is enabled we include the full stack trace + logger.warn(() -> format("%s%s discovery result", Peer.this, believedMasterBy), e); + } else { + final StringBuilder messageBuilder = new StringBuilder(); + Throwable cause = e; + while (cause != null && messageBuilder.length() <= 1024) { + messageBuilder.append(": ").append(cause.getMessage()); + cause = cause.getCause(); + } + final String message = messageBuilder.length() < 1024 + ? messageBuilder.toString() + : (messageBuilder.substring(0, 1023) + "..."); + logger.warn( + "{}{} discovery result{}; for summary, see logs from {}; for troubleshooting guidance, see {}", + Peer.this, + believedMasterBy, + message, + ClusterFormationFailureHelper.class.getCanonicalName(), + ReferenceDocs.DISCOVERY_TROUBLESHOOTING + ); } - final String message = messageBuilder.length() < 1024 - ? messageBuilder.toString() - : (messageBuilder.substring(0, 1023) + "..."); - logger.warn( - "{}{} discovery result{}; for summary, see logs from {}; for troubleshooting guidance, see {}", - Peer.this, - believedMasterBy, - message, - ClusterFormationFailureHelper.class.getCanonicalName(), - ReferenceDocs.DISCOVERY_TROUBLESHOOTING - ); + } else { + logger.debug(() -> format("%s discovery result", Peer.this), e); + } + synchronized (mutex) { + assert probeConnectionResult.get() == null + : "discoveryNode unexpectedly already set to " + probeConnectionResult.get(); + if (isActive()) { + peersByAddress.remove(transportAddress); + } // else this Peer has been superseded by a different instance which should be left in place } - } else { - logger.debug(() -> format("%s discovery result", Peer.this), e); - } - synchronized (mutex) { - assert probeConnectionResult.get() == null - : "discoveryNode unexpectedly already set to " + probeConnectionResult.get(); - if (isActive()) { - peersByAddress.remove(transportAddress); - } // else this Peer has been superseded by a different instance which should be left in place } - } - }); + }) + ); } private void requestPeers() { diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 0605e36b2ea4b..3d81cccc92dc7 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -525,7 +525,8 @@ public synchronized IndexShard createShard( this.indexSettings, directory, lock, - new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)) + new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)), + this.indexSettings.getIndexSortConfig().hasIndexSort() ); eventListener.onStoreCreated(shardId); indexShard = new IndexShard( diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 5446027a2ca40..944d50f7ea06c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -294,6 +294,12 @@ public final class IndexSettings { }, new RefreshIntervalValidator(), Property.Dynamic, Property.IndexScope, Property.ServerlessPublic); static class RefreshIntervalValidator implements Setting.Validator { + + static final String STATELESS_ALLOW_INDEX_REFRESH_INTERVAL_OVERRIDE = "es.stateless.allow.index.refresh_interval.override"; + private static final boolean IS_OVERRIDE_ALLOWED = Boolean.parseBoolean( + System.getProperty(STATELESS_ALLOW_INDEX_REFRESH_INTERVAL_OVERRIDE, "false") + ); + @Override public void validate(TimeValue value) {} @@ -308,16 +314,19 @@ public void validate(final TimeValue value, final Map, Object> settin && value.compareTo(TimeValue.ZERO) > 0 && value.compareTo(STATELESS_MIN_NON_FAST_REFRESH_INTERVAL) < 0 && indexVersion.after(IndexVersions.V_8_10_0)) { - throw new IllegalArgumentException( - "index setting [" - + IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey() - + "=" - + value - + "] should be either " - + TimeValue.MINUS_ONE - + " or equal to or greater than " - + STATELESS_MIN_NON_FAST_REFRESH_INTERVAL - ); + + if (IS_OVERRIDE_ALLOWED == false) { + throw new IllegalArgumentException( + "index setting [" + + IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey() + + "=" + + value + + "] should be either " + + TimeValue.MINUS_ONE + + " or equal to or greater than " + + STATELESS_MIN_NON_FAST_REFRESH_INTERVAL + ); + } } } @@ -607,16 +616,25 @@ public void validate(Instant value) {} @Override public void validate(Instant value, Map, Object> settings) { - @SuppressWarnings("unchecked") Instant startTime = (Instant) settings.get(TIME_SERIES_START_TIME); if (startTime.toEpochMilli() > value.toEpochMilli()) { throw new IllegalArgumentException("index.time_series.end_time must be larger than index.time_series.start_time"); } + + // The index.time_series.end_time setting can only be specified if the index.mode setting has been set to time_series + // This check here is specifically needed because in case of updating index settings the validation the gets executed + // in IndexSettings constructor when reading the index.mode setting doesn't get executed. + IndexMode indexMode = (IndexMode) settings.get(MODE); + if (indexMode != IndexMode.TIME_SERIES) { + throw new IllegalArgumentException( + "[" + TIME_SERIES_END_TIME.getKey() + "] requires [index.mode=" + IndexMode.TIME_SERIES + "]" + ); + } } @Override public Iterator> settings() { - List> settings = List.of(TIME_SERIES_START_TIME); + List> settings = List.of(TIME_SERIES_START_TIME, MODE); return settings.iterator(); } }, diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 1a933a396108e..8bebd390d3440 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -108,7 +108,10 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion SOURCE_MAPPER_LOSSY_PARAMS_CHECK = def(8_506_00_0, Version.LUCENE_9_10_0); public static final IndexVersion SEMANTIC_TEXT_FIELD_TYPE = def(8_507_00_0, Version.LUCENE_9_10_0); public static final IndexVersion UPGRADE_TO_LUCENE_9_11 = def(8_508_00_0, Version.LUCENE_9_11_0); - + public static final IndexVersion UNIQUE_TOKEN_FILTER_POS_FIX = def(8_509_00_0, Version.LUCENE_9_11_0); + public static final IndexVersion ADD_SECURITY_MIGRATION = def(8_510_00_0, Version.LUCENE_9_11_0); + public static final IndexVersion UPGRADE_TO_LUCENE_9_11_1 = def(8_511_00_0, Version.LUCENE_9_11_1); + public static final IndexVersion INDEX_SORTING_ON_NESTED = def(8_512_00_0, Version.LUCENE_9_11_1); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index e4b86876c99d3..33b33c865d01e 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -351,7 +351,7 @@ public static Reader getReaderFromFile(Environment env, String filePath, String public static Reader getReaderFromIndex(String synonymsSet, SynonymsManagementAPIService synonymsManagementAPIService) { final PlainActionFuture> synonymsLoadingFuture = new PlainActionFuture<>(); - synonymsManagementAPIService.getSynonymSetRules(synonymsSet, 0, 10_000, synonymsLoadingFuture); + synonymsManagementAPIService.getSynonymSetRules(synonymsSet, synonymsLoadingFuture); PagedResult results = synonymsLoadingFuture.actionGet(); SynonymRule[] synonymRules = results.pageResults(); diff --git a/server/src/main/java/org/elasticsearch/index/codec/DeduplicatingFieldInfosFormat.java b/server/src/main/java/org/elasticsearch/index/codec/DeduplicatingFieldInfosFormat.java new file mode 100644 index 0000000000000..75ec265a68391 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/DeduplicatingFieldInfosFormat.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec; + +import org.apache.lucene.codecs.FieldInfosFormat; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.StringLiteralDeduplicator; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.mapper.FieldMapper; + +import java.io.IOException; +import java.util.Map; + +/** + * Wrapper around a {@link FieldInfosFormat} that will deduplicate and intern all field names, attribute-keys and -values, and in most + * cases attribute maps on read. We use this to reduce the per-field overhead for Elasticsearch instances holding a large number of + * segments. + */ +public final class DeduplicatingFieldInfosFormat extends FieldInfosFormat { + + private static final Map, Map> attributeDeduplicator = ConcurrentCollections.newConcurrentMap(); + + private static final StringLiteralDeduplicator attributesDeduplicator = new StringLiteralDeduplicator(); + + private final FieldInfosFormat delegate; + + public DeduplicatingFieldInfosFormat(FieldInfosFormat delegate) { + this.delegate = delegate; + } + + @Override + public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, IOContext iocontext) throws IOException { + final FieldInfos fieldInfos = delegate.read(directory, segmentInfo, segmentSuffix, iocontext); + final FieldInfo[] deduplicated = new FieldInfo[fieldInfos.size()]; + int i = 0; + for (FieldInfo fi : fieldInfos) { + deduplicated[i++] = new FieldInfo( + FieldMapper.internFieldName(fi.getName()), + fi.number, + fi.hasVectors(), + fi.omitsNorms(), + fi.hasPayloads(), + fi.getIndexOptions(), + fi.getDocValuesType(), + fi.getDocValuesGen(), + internStringStringMap(fi.attributes()), + fi.getPointDimensionCount(), + fi.getPointIndexDimensionCount(), + fi.getPointNumBytes(), + fi.getVectorDimension(), + fi.getVectorEncoding(), + fi.getVectorSimilarityFunction(), + fi.isSoftDeletesField(), + fi.isParentField() + ); + } + return new FieldInfos(deduplicated); + } + + private static Map internStringStringMap(Map m) { + if (m.size() > 10) { + return m; + } + var res = attributeDeduplicator.get(m); + if (res == null) { + if (attributeDeduplicator.size() > 100) { + // Unexpected edge case to have more than 100 different attribute maps + // Just to be safe, don't retain more than 100 maps to prevent a potential memory leak + attributeDeduplicator.clear(); + } + final Map interned = Maps.newHashMapWithExpectedSize(m.size()); + m.forEach((key, value) -> interned.put(attributesDeduplicator.deduplicate(key), attributesDeduplicator.deduplicate(value))); + res = Map.copyOf(interned); + attributeDeduplicator.put(res, res); + } + return res; + } + + @Override + public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos, IOContext context) + throws IOException { + delegate.write(directory, segmentInfo, segmentSuffix, infos, context); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/Elasticsearch814Codec.java b/server/src/main/java/org/elasticsearch/index/codec/Elasticsearch814Codec.java index e85e05c87b083..dd7a668605e57 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/Elasticsearch814Codec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/Elasticsearch814Codec.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.DocValuesFormat; +import org.apache.lucene.codecs.FieldInfosFormat; import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.codecs.PostingsFormat; @@ -30,6 +31,8 @@ public class Elasticsearch814Codec extends FilterCodec { private final StoredFieldsFormat storedFieldsFormat; + private final FieldInfosFormat fieldInfosFormat; + private final PostingsFormat defaultPostingsFormat; private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() { @Override @@ -69,6 +72,7 @@ public Elasticsearch814Codec(Zstd814StoredFieldsFormat.Mode mode) { this.defaultPostingsFormat = new Lucene99PostingsFormat(); this.defaultDVFormat = new Lucene90DocValuesFormat(); this.defaultKnnVectorsFormat = new Lucene99HnswVectorsFormat(); + this.fieldInfosFormat = new DeduplicatingFieldInfosFormat(delegate.fieldInfosFormat()); } @Override @@ -127,4 +131,9 @@ public DocValuesFormat getDocValuesFormatForField(String field) { public KnnVectorsFormat getKnnVectorsFormatForField(String field) { return defaultKnnVectorsFormat; } + + @Override + public FieldInfosFormat fieldInfosFormat() { + return fieldInfosFormat; + } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES87BloomFilterPostingsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES87BloomFilterPostingsFormat.java index 191fe8f75b2f0..01d874adec14d 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES87BloomFilterPostingsFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES87BloomFilterPostingsFormat.java @@ -128,7 +128,6 @@ final class FieldsWriter extends FieldsConsumer { private final List fieldsGroups = new ArrayList<>(); private final List toCloses = new ArrayList<>(); private boolean closed; - private final int[] hashes = new int[NUM_HASH_FUNCTIONS]; FieldsWriter(SegmentWriteState state) throws IOException { this.state = state; @@ -180,23 +179,24 @@ public Iterator iterator() { } private void writeBloomFilters(Fields fields) throws IOException { - for (String field : fields) { - final Terms terms = fields.terms(field); - if (terms == null) { - continue; - } - final int bloomFilterSize = bloomFilterSize(state.segmentInfo.maxDoc()); - final int numBytes = numBytesForBloomFilter(bloomFilterSize); - try (ByteArray buffer = bigArrays.newByteArray(numBytes)) { + final int bloomFilterSize = bloomFilterSize(state.segmentInfo.maxDoc()); + final int numBytes = numBytesForBloomFilter(bloomFilterSize); + final int[] hashes = new int[NUM_HASH_FUNCTIONS]; + try (ByteArray buffer = bigArrays.newByteArray(numBytes, false)) { + long written = indexOut.getFilePointer(); + for (String field : fields) { + final Terms terms = fields.terms(field); + if (terms == null) { + continue; + } + buffer.fill(0, numBytes, (byte) 0); final TermsEnum termsEnum = terms.iterator(); while (true) { final BytesRef term = termsEnum.next(); if (term == null) { break; } - - hashTerm(term, hashes); - for (int hash : hashes) { + for (int hash : hashTerm(term, hashes)) { hash = hash % bloomFilterSize; final int pos = hash >> 3; final int mask = 1 << (hash & 7); @@ -204,9 +204,13 @@ private void writeBloomFilters(Fields fields) throws IOException { buffer.set(pos, val); } } - bloomFilters.add(new BloomFilter(field, indexOut.getFilePointer(), bloomFilterSize)); - final BytesReference bytes = BytesReference.fromByteArray(buffer, numBytes); - bytes.writeTo(new IndexOutputOutputStream(indexOut)); + bloomFilters.add(new BloomFilter(field, written, bloomFilterSize)); + if (buffer.hasArray()) { + indexOut.writeBytes(buffer.array(), 0, numBytes); + } else { + BytesReference.fromByteArray(buffer, numBytes).writeTo(new IndexOutputOutputStream(indexOut)); + } + written += numBytes; } } } @@ -636,35 +640,10 @@ private MurmurHash3() {} * @param length The length of array * @return The sum of the two 64-bit hashes that make up the hash128 */ - public static long hash64(final byte[] data, final int offset, final int length) { - // We hope that the C2 escape analysis prevents ths allocation from creating GC pressure. - long[] hash128 = { 0, 0 }; - hash128x64Internal(data, offset, length, DEFAULT_SEED, hash128); - return hash128[0]; - } - - /** - * Generates 128-bit hash from the byte array with the given offset, length and seed. - * - *

      This is an implementation of the 128-bit hash function {@code MurmurHash3_x64_128} - * from Austin Appleby's original MurmurHash3 {@code c++} code in SMHasher.

      - * - * @param data The input byte array - * @param offset The first element of array - * @param length The length of array - * @param seed The initial seed value - * @return The 128-bit hash (2 longs) - */ @SuppressWarnings("fallthrough") - private static long[] hash128x64Internal( - final byte[] data, - final int offset, - final int length, - final long seed, - final long[] result - ) { - long h1 = seed; - long h2 = seed; + public static long hash64(final byte[] data, final int offset, final int length) { + long h1 = MurmurHash3.DEFAULT_SEED; + long h2 = MurmurHash3.DEFAULT_SEED; final int nblocks = length >> 4; // body @@ -749,11 +728,8 @@ private static long[] hash128x64Internal( h2 = fmix64(h2); h1 += h2; - h2 += h1; - result[0] = h1; - result[1] = h2; - return result; + return h1; } /** diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormat.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormat.java index 690b580d0c322..861f5ecd56f5a 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormat.java @@ -54,11 +54,11 @@ public KnnVectorsReader fieldsReader(SegmentReadState state) throws IOException return new ES813FlatVectorReader(format.fieldsReader(state)); } - public static class ES813FlatVectorWriter extends KnnVectorsWriter { + static class ES813FlatVectorWriter extends KnnVectorsWriter { private final FlatVectorsWriter writer; - public ES813FlatVectorWriter(FlatVectorsWriter writer) { + ES813FlatVectorWriter(FlatVectorsWriter writer) { super(); this.writer = writer; } @@ -94,11 +94,11 @@ public void mergeOneField(FieldInfo fieldInfo, MergeState mergeState) throws IOE } } - public static class ES813FlatVectorReader extends KnnVectorsReader { + static class ES813FlatVectorReader extends KnnVectorsReader { private final FlatVectorsReader reader; - public ES813FlatVectorReader(FlatVectorsReader reader) { + ES813FlatVectorReader(FlatVectorsReader reader) { super(); this.reader = reader; } diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorFormat.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorFormat.java new file mode 100644 index 0000000000000..86bc58c5862ee --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorFormat.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.vectors; + +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.apache.lucene.codecs.KnnVectorsReader; +import org.apache.lucene.codecs.KnnVectorsWriter; +import org.apache.lucene.codecs.hnsw.FlatVectorsFormat; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.IOException; + +public class ES815BitFlatVectorFormat extends KnnVectorsFormat { + + static final String NAME = "ES815BitFlatVectorFormat"; + + private final FlatVectorsFormat format = new ES815BitFlatVectorsFormat(); + + /** + * Sole constructor + */ + public ES815BitFlatVectorFormat() { + super(NAME); + } + + @Override + public KnnVectorsWriter fieldsWriter(SegmentWriteState state) throws IOException { + return new ES813FlatVectorFormat.ES813FlatVectorWriter(format.fieldsWriter(state)); + } + + @Override + public KnnVectorsReader fieldsReader(SegmentReadState state) throws IOException { + return new ES813FlatVectorFormat.ES813FlatVectorReader(format.fieldsReader(state)); + } + + @Override + public String toString() { + return NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java new file mode 100644 index 0000000000000..de91833c99842 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorsFormat.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.vectors; + +import org.apache.lucene.codecs.hnsw.FlatVectorsFormat; +import org.apache.lucene.codecs.hnsw.FlatVectorsReader; +import org.apache.lucene.codecs.hnsw.FlatVectorsScorer; +import org.apache.lucene.codecs.hnsw.FlatVectorsWriter; +import org.apache.lucene.codecs.lucene99.Lucene99FlatVectorsFormat; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.util.hnsw.RandomAccessVectorValues; +import org.apache.lucene.util.hnsw.RandomVectorScorer; +import org.apache.lucene.util.hnsw.RandomVectorScorerSupplier; +import org.apache.lucene.util.quantization.RandomAccessQuantizedByteVectorValues; +import org.elasticsearch.script.field.vectors.ESVectorUtil; + +import java.io.IOException; + +class ES815BitFlatVectorsFormat extends FlatVectorsFormat { + + private final FlatVectorsFormat delegate = new Lucene99FlatVectorsFormat(FlatBitVectorScorer.INSTANCE); + + @Override + public FlatVectorsWriter fieldsWriter(SegmentWriteState segmentWriteState) throws IOException { + return delegate.fieldsWriter(segmentWriteState); + } + + @Override + public FlatVectorsReader fieldsReader(SegmentReadState segmentReadState) throws IOException { + return delegate.fieldsReader(segmentReadState); + } + + static class FlatBitVectorScorer implements FlatVectorsScorer { + + static final FlatBitVectorScorer INSTANCE = new FlatBitVectorScorer(); + + static void checkDimensions(int queryLen, int fieldLen) { + if (queryLen != fieldLen) { + throw new IllegalArgumentException("vector query dimension: " + queryLen + " differs from field dimension: " + fieldLen); + } + } + + @Override + public String toString() { + return super.toString(); + } + + @Override + public RandomVectorScorerSupplier getRandomVectorScorerSupplier( + VectorSimilarityFunction vectorSimilarityFunction, + RandomAccessVectorValues randomAccessVectorValues + ) throws IOException { + assert randomAccessVectorValues instanceof RandomAccessVectorValues.Bytes; + assert vectorSimilarityFunction == VectorSimilarityFunction.EUCLIDEAN; + if (randomAccessVectorValues instanceof RandomAccessVectorValues.Bytes randomAccessVectorValuesBytes) { + assert randomAccessVectorValues instanceof RandomAccessQuantizedByteVectorValues == false; + return switch (vectorSimilarityFunction) { + case DOT_PRODUCT, MAXIMUM_INNER_PRODUCT, COSINE, EUCLIDEAN -> new HammingScorerSupplier(randomAccessVectorValuesBytes); + }; + } + throw new IllegalArgumentException("Unsupported vector type or similarity function"); + } + + @Override + public RandomVectorScorer getRandomVectorScorer( + VectorSimilarityFunction vectorSimilarityFunction, + RandomAccessVectorValues randomAccessVectorValues, + byte[] bytes + ) { + assert randomAccessVectorValues instanceof RandomAccessVectorValues.Bytes; + assert vectorSimilarityFunction == VectorSimilarityFunction.EUCLIDEAN; + if (randomAccessVectorValues instanceof RandomAccessVectorValues.Bytes randomAccessVectorValuesBytes) { + checkDimensions(bytes.length, randomAccessVectorValuesBytes.dimension()); + return switch (vectorSimilarityFunction) { + case DOT_PRODUCT, MAXIMUM_INNER_PRODUCT, COSINE, EUCLIDEAN -> new HammingVectorScorer( + randomAccessVectorValuesBytes, + bytes + ); + }; + } + throw new IllegalArgumentException("Unsupported vector type or similarity function"); + } + + @Override + public RandomVectorScorer getRandomVectorScorer( + VectorSimilarityFunction vectorSimilarityFunction, + RandomAccessVectorValues randomAccessVectorValues, + float[] floats + ) { + throw new IllegalArgumentException("Unsupported vector type"); + } + } + + static float hammingScore(byte[] a, byte[] b) { + return ((a.length * Byte.SIZE) - ESVectorUtil.xorBitCount(a, b)) / (float) (a.length * Byte.SIZE); + } + + static class HammingVectorScorer extends RandomVectorScorer.AbstractRandomVectorScorer { + private final byte[] query; + private final RandomAccessVectorValues.Bytes byteValues; + + HammingVectorScorer(RandomAccessVectorValues.Bytes byteValues, byte[] query) { + super(byteValues); + this.query = query; + this.byteValues = byteValues; + } + + @Override + public float score(int i) throws IOException { + return hammingScore(byteValues.vectorValue(i), query); + } + } + + static class HammingScorerSupplier implements RandomVectorScorerSupplier { + private final RandomAccessVectorValues.Bytes byteValues, byteValues1, byteValues2; + + HammingScorerSupplier(RandomAccessVectorValues.Bytes byteValues) throws IOException { + this.byteValues = byteValues; + this.byteValues1 = byteValues.copy(); + this.byteValues2 = byteValues.copy(); + } + + @Override + public RandomVectorScorer scorer(int i) throws IOException { + byte[] query = byteValues1.vectorValue(i); + return new HammingVectorScorer(byteValues2, query); + } + + @Override + public RandomVectorScorerSupplier copy() throws IOException { + return new HammingScorerSupplier(byteValues); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815HnswBitVectorsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815HnswBitVectorsFormat.java new file mode 100644 index 0000000000000..f7884c0b73688 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/ES815HnswBitVectorsFormat.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.vectors; + +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.apache.lucene.codecs.KnnVectorsReader; +import org.apache.lucene.codecs.KnnVectorsWriter; +import org.apache.lucene.codecs.hnsw.FlatVectorsFormat; +import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsReader; +import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsWriter; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.IOException; + +public class ES815HnswBitVectorsFormat extends KnnVectorsFormat { + + static final String NAME = "ES815HnswBitVectorsFormat"; + + static final int MAXIMUM_MAX_CONN = 512; + static final int MAXIMUM_BEAM_WIDTH = 3200; + + private final int maxConn; + private final int beamWidth; + + private final FlatVectorsFormat flatVectorsFormat = new ES815BitFlatVectorsFormat(); + + public ES815HnswBitVectorsFormat() { + this(16, 100); + } + + public ES815HnswBitVectorsFormat(int maxConn, int beamWidth) { + super(NAME); + if (maxConn <= 0 || maxConn > MAXIMUM_MAX_CONN) { + throw new IllegalArgumentException( + "maxConn must be positive and less than or equal to " + MAXIMUM_MAX_CONN + "; maxConn=" + maxConn + ); + } + if (beamWidth <= 0 || beamWidth > MAXIMUM_BEAM_WIDTH) { + throw new IllegalArgumentException( + "beamWidth must be positive and less than or equal to " + MAXIMUM_BEAM_WIDTH + "; beamWidth=" + beamWidth + ); + } + this.maxConn = maxConn; + this.beamWidth = beamWidth; + } + + @Override + public KnnVectorsWriter fieldsWriter(SegmentWriteState state) throws IOException { + return new Lucene99HnswVectorsWriter(state, maxConn, beamWidth, flatVectorsFormat.fieldsWriter(state), 1, null); + } + + @Override + public KnnVectorsReader fieldsReader(SegmentReadState state) throws IOException { + return new Lucene99HnswVectorsReader(state, flatVectorsFormat.fieldsReader(state)); + } + + @Override + public String toString() { + return "ES815HnswBitVectorsFormat(name=ES815HnswBitVectorsFormat, maxConn=" + + maxConn + + ", beamWidth=" + + beamWidth + + ", flatVectorFormat=" + + flatVectorsFormat + + ")"; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index a69cc42163dd2..22bab1742589e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -153,7 +153,7 @@ private SafeCommitInfo getNewSafeCommitInfo(IndexCommit newSafeCommit) { return currentSafeCommitInfo; } - if (currentSafeCommitInfo.localCheckpoint == newSafeCommitLocalCheckpoint) { + if (currentSafeCommitInfo.localCheckpoint() == newSafeCommitLocalCheckpoint) { // the new commit could in principle have the same LCP but a different doc count due to extra operations between its LCP and // MSN, but that is a transient state since we'll eventually advance the LCP. The doc count is only used for heuristics around // expiring excessively-lagging retention leases, so a little inaccuracy is tolerable here. @@ -164,7 +164,7 @@ private SafeCommitInfo getNewSafeCommitInfo(IndexCommit newSafeCommit) { return new SafeCommitInfo(newSafeCommitLocalCheckpoint, getDocCountOfCommit(newSafeCommit)); } catch (IOException ex) { logger.info("failed to get the total docs from the safe commit; use the total docs from the previous safe commit", ex); - return new SafeCommitInfo(newSafeCommitLocalCheckpoint, currentSafeCommitInfo.docCount); + return new SafeCommitInfo(newSafeCommitLocalCheckpoint, currentSafeCommitInfo.docCount()); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 1d62debd77e7f..6f4511483126f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -128,6 +128,7 @@ public abstract class Engine implements Closeable { public static final String CAN_MATCH_SEARCH_SOURCE = "can_match"; protected static final String DOC_STATS_SOURCE = "doc_stats"; public static final long UNKNOWN_PRIMARY_TERM = -1L; + public static final String ROOT_DOC_FIELD_NAME = "__root_doc_for_nested"; protected final ShardId shardId; protected final Logger logger; @@ -315,7 +316,7 @@ private long getSparseVectorValueCount(final LeafReader atomicReader, MappingLoo for (Mapper mapper : mappingLookup.fieldMappers()) { if (mapper instanceof FieldMapper fieldMapper) { if (fieldMapper.fieldType() instanceof SparseVectorFieldMapper.SparseVectorFieldType) { - mappers.put(fieldMapper.name(), fieldMapper); + mappers.put(fieldMapper.fullPath(), fieldMapper); } } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index be64365fedd34..03d244cd8e4ef 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -72,6 +72,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.mapper.DocumentParser; @@ -343,8 +344,8 @@ private LocalCheckpointTracker createLocalCheckpointTracker( final SequenceNumbers.CommitInfo seqNoStats = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( store.readLastCommittedSegmentsInfo().userData.entrySet() ); - maxSeqNo = seqNoStats.maxSeqNo; - localCheckpoint = seqNoStats.localCheckpoint; + maxSeqNo = seqNoStats.maxSeqNo(); + localCheckpoint = seqNoStats.localCheckpoint(); logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); return localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint); } @@ -1687,7 +1688,7 @@ private Exception tryAcquireInFlightDocs(Operation operation, int addingDocs) { final long totalDocs = indexWriter.getPendingNumDocs() + inFlightDocCount.addAndGet(addingDocs); if (totalDocs > maxDocs) { releaseInFlightDocs(addingDocs); - return new IllegalArgumentException("Number of documents in the index can't exceed [" + maxDocs + "]"); + return new IllegalArgumentException("Number of documents in the shard cannot exceed [" + maxDocs + "]"); } else { return null; } @@ -2142,9 +2143,8 @@ private boolean shouldPeriodicallyFlush(long flushThresholdSizeInBytes, long flu final long localCheckpointOfLastCommit = Long.parseLong( lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ); - final long translogGenerationOfLastCommit = translog.getMinGenerationForSeqNo( - localCheckpointOfLastCommit + 1 - ).translogFileGeneration; + final long translogGenerationOfLastCommit = translog.getMinGenerationForSeqNo(localCheckpointOfLastCommit + 1) + .translogFileGeneration(); if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThresholdSizeInBytes && relativeTimeInNanosSupplier.getAsLong() - lastFlushTimestamp < flushThresholdAgeInNanos) { return false; @@ -2164,9 +2164,8 @@ private boolean shouldPeriodicallyFlush(long flushThresholdSizeInBytes, long flu * * This method is to maintain translog only, thus IndexWriter#hasUncommittedChanges condition is not considered. */ - final long translogGenerationOfNewCommit = translog.getMinGenerationForSeqNo( - localCheckpointTracker.getProcessedCheckpoint() + 1 - ).translogFileGeneration; + final long translogGenerationOfNewCommit = translog.getMinGenerationForSeqNo(localCheckpointTracker.getProcessedCheckpoint() + 1) + .translogFileGeneration(); return translogGenerationOfLastCommit < translogGenerationOfNewCommit || localCheckpointTracker.getProcessedCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } @@ -2728,6 +2727,10 @@ private IndexWriterConfig getIndexWriterConfig() { } if (config().getIndexSort() != null) { iwc.setIndexSort(config().getIndexSort()); + if (config().getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.INDEX_SORTING_ON_NESTED)) { + // Needed to support index sorting in the presence of nested objects. + iwc.setParentField(ROOT_DOC_FIELD_NAME); + } } // Provide a custom leaf sorter, so that index readers opened from this writer // will have its leaves sorted according the given leaf sorter. diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index eda408a9c8fde..c9474b58ef447 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -244,8 +244,8 @@ protected void closeNoLock(String reason, CountDownLatch closedLatch) { private static SeqNoStats buildSeqNoStats(EngineConfig config, SegmentInfos infos) { final SequenceNumbers.CommitInfo seqNoStats = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(infos.userData.entrySet()); - long maxSeqNo = seqNoStats.maxSeqNo; - long localCheckpoint = seqNoStats.localCheckpoint; + long maxSeqNo = seqNoStats.maxSeqNo(); + long localCheckpoint = seqNoStats.localCheckpoint(); return new SeqNoStats(maxSeqNo, localCheckpoint, config.getGlobalCheckpointSupplier().getAsLong()); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/SafeCommitInfo.java b/server/src/main/java/org/elasticsearch/index/engine/SafeCommitInfo.java index 6858315f5b37f..5b206ecfd90dc 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SafeCommitInfo.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SafeCommitInfo.java @@ -12,15 +12,6 @@ /** * Information about the safe commit, for making decisions about recoveries. */ -public class SafeCommitInfo { - - public final long localCheckpoint; - public final int docCount; - - public SafeCommitInfo(long localCheckpoint, int docCount) { - this.localCheckpoint = localCheckpoint; - this.docCount = docCount; - } - +public record SafeCommitInfo(long localCheckpoint, int docCount) { public static final SafeCommitInfo EMPTY = new SafeCommitInfo(SequenceNumbers.NO_OPS_PERFORMED, 0); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataContext.java b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataContext.java index 3cfab4e599015..dd3d8c9ffda4b 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataContext.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataContext.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.fielddata; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.lookup.SearchLookup; @@ -25,6 +26,7 @@ */ public record FieldDataContext( String fullyQualifiedIndexName, + IndexSettings indexSettings, Supplier lookupSupplier, Function> sourcePathsLookup, MappedFieldType.FielddataOperation fielddataOperation @@ -38,11 +40,8 @@ public record FieldDataContext( * @param reason the reason that runtime fields are not supported */ public static FieldDataContext noRuntimeFields(String reason) { - return new FieldDataContext( - "", - () -> { throw new UnsupportedOperationException("Runtime fields not supported for [" + reason + "]"); }, - Set::of, - MappedFieldType.FielddataOperation.SEARCH - ); + return new FieldDataContext("", null, () -> { + throw new UnsupportedOperationException("Runtime fields not supported for [" + reason + "]"); + }, Set::of, MappedFieldType.FielddataOperation.SEARCH); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 831244a3969ef..39f4a3a82c5c4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeometryFormatterFactory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.DeprecationHandler; @@ -35,6 +36,12 @@ */ public abstract class AbstractGeometryFieldMapper extends FieldMapper { + // The GeoShapeFieldMapper class does not exist in server any more. + // For backwards compatibility we add the name of the class manually. + protected static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger( + "org.elasticsearch.index.mapper.GeoShapeFieldMapper" + ); + public static Parameter> ignoreMalformedParam( Function> initializer, boolean ignoreMalformedByDefault diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java index 56f1faeb38a5b..619c6c6613d59 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -14,7 +14,7 @@ import java.util.function.Function; /** - * Base class for {@link GeoShapeFieldMapper} + * Base class for shape field mappers */ public abstract class AbstractShapeGeometryFieldMapper extends AbstractGeometryFieldMapper { @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 5552f62bf8ce4..1891e19b6d036 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -70,8 +70,8 @@ public BinaryFieldMapper.Builder docValues(boolean hasDocValues) { @Override public BinaryFieldMapper build(MapperBuilderContext context) { return new BinaryFieldMapper( - name(), - new BinaryFieldType(context.buildFullName(name()), stored.getValue(), hasDocValues.getValue(), meta.getValue()), + leafName(), + new BinaryFieldType(context.buildFullName(leafName()), stored.getValue(), hasDocValues.getValue(), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo, this @@ -192,7 +192,7 @@ public void indexValue(DocumentParserContext context, byte[] value) { @Override public FieldMapper.Builder getMergeBuilder() { - return new BinaryFieldMapper.Builder(simpleName(), isSyntheticSourceEnabledViaIndexMode).init(this); + return new BinaryFieldMapper.Builder(leafName(), isSyntheticSourceEnabledViaIndexMode).init(this); } @Override @@ -209,16 +209,20 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } if (hasDocValues == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } - return new BinaryDocValuesSyntheticFieldLoader(name()) { + return new BinaryDocValuesSyntheticFieldLoader(fullPath()) { @Override protected void writeValue(XContentBuilder b, BytesRef value) throws IOException { var in = new ByteArrayStreamInput(); @@ -229,10 +233,10 @@ protected void writeValue(XContentBuilder b, BytesRef value) throws IOException case 0: return; case 1: - b.field(simpleName()); + b.field(leafName()); break; default: - b.startArray(simpleName()); + b.startArray(leafName()); } for (int i = 0; i < count; i++) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index c6b428458d2b9..f0cc51f3effa5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -116,7 +116,7 @@ protected Parameter[] getParameters() { @Override public BooleanFieldMapper build(MapperBuilderContext context) { MappedFieldType ft = new BooleanFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.getValue() && indexCreatedVersion.isLegacyIndexVersion() == false, stored.getValue(), docValues.getValue(), @@ -124,7 +124,14 @@ public BooleanFieldMapper build(MapperBuilderContext context) { scriptValues(), meta.getValue() ); - return new BooleanFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, context.isSourceSynthetic(), this); + return new BooleanFieldMapper( + leafName(), + ft, + multiFieldsBuilder.build(this, context), + copyTo, + context.isSourceSynthetic(), + this + ); } private FieldValues scriptValues() { @@ -134,7 +141,7 @@ private FieldValues scriptValues() { BooleanFieldScript.Factory scriptFactory = scriptCompiler.compile(script.get(), BooleanFieldScript.CONTEXT); return scriptFactory == null ? null - : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(leafName(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -434,7 +441,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio context.addIgnoredField(mappedFieldType.name()); if (storeMalformedFields) { // Save a copy of the field so synthetic source can load it - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), context.parser())); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser())); } } else { throw e; @@ -473,7 +480,7 @@ protected void indexScriptValues( @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), scriptCompiler, ignoreMalformedByDefault, indexCreatedVersion).init(this); + return new Builder(leafName(), scriptCompiler, ignoreMalformedByDefault, indexCreatedVersion).init(this); } @Override @@ -498,15 +505,19 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { } if (hasDocValues == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName(), ignoreMalformed.value()) { + return new SortedNumericDocValuesSyntheticFieldLoader(fullPath(), leafName(), ignoreMalformed.value()) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(value == 1); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 5d5ef076852a8..23272fbd354f3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -82,7 +82,7 @@ public class CompletionFieldMapper extends FieldMapper { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), builder.defaultAnalyzer, builder.indexVersionCreated).init(this); + return new Builder(leafName(), builder.defaultAnalyzer, builder.indexVersionCreated).init(this); } public static class Defaults { @@ -205,9 +205,9 @@ public CompletionFieldMapper build(MapperBuilderContext context) { new CompletionAnalyzer(this.searchAnalyzer.getValue(), preserveSeparators.getValue(), preservePosInc.getValue()) ); - CompletionFieldType ft = new CompletionFieldType(context.buildFullName(name()), completionAnalyzer, meta.getValue()); + CompletionFieldType ft = new CompletionFieldType(context.buildFullName(leafName()), completionAnalyzer, meta.getValue()); ft.setContextMappings(contexts.getValue()); - return new CompletionFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, this); + return new CompletionFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, this); } private void checkCompletionContextsLimit() { @@ -224,7 +224,7 @@ private void checkCompletionContextsLimit() { + COMPLETION_CONTEXTS_LIMIT + "] completion contexts" + " in the mapping for field [" - + name() + + leafName() + "]. " + "The maximum allowed number of completion contexts in a mapping will be limited to " + "[" diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java index ece0b082a3ccf..6f4a8ffa92cbe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java @@ -8,7 +8,8 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongField; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.elasticsearch.common.bytes.BytesReference; @@ -22,7 +23,6 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.time.Instant; -import java.util.List; import java.util.Map; import static org.elasticsearch.core.TimeValue.NSEC_PER_MSEC; @@ -36,6 +36,7 @@ public class DataStreamTimestampFieldMapper extends MetadataFieldMapper { public static final String NAME = "_data_stream_timestamp"; public static final String DEFAULT_PATH = "@timestamp"; + public static final String TIMESTAMP_VALUE_KEY = "@timestamp._value"; public static final DataStreamTimestampFieldMapper ENABLED_INSTANCE = new DataStreamTimestampFieldMapper(true); private static final DataStreamTimestampFieldMapper DISABLED_INSTANCE = new DataStreamTimestampFieldMapper(false); @@ -189,43 +190,44 @@ public void doValidate(MappingLookup lookup) { } } + public static void storeTimestampValueForReuse(LuceneDocument document, long timestamp) { + var existingField = document.getByKey(DataStreamTimestampFieldMapper.TIMESTAMP_VALUE_KEY); + if (existingField != null) { + throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] encountered multiple values"); + } + + document.onlyAddKey( + DataStreamTimestampFieldMapper.TIMESTAMP_VALUE_KEY, + new LongField(DataStreamTimestampFieldMapper.TIMESTAMP_VALUE_KEY, timestamp, Field.Store.NO) + ); + } + + public static long extractTimestampValue(LuceneDocument document) { + IndexableField timestampValueField = document.getByKey(TIMESTAMP_VALUE_KEY); + if (timestampValueField == null) { + throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] is missing"); + } + + return timestampValueField.numericValue().longValue(); + } + @Override public void postParse(DocumentParserContext context) throws IOException { if (enabled == false) { // not configured, so skip the validation return; } - boolean foundFsTimestampField = false; - IndexableField first = null; - final List fields = context.rootDoc().getFields(); - for (int i = 0; i < fields.size(); i++) { - IndexableField indexableField = fields.get(i); - if (DEFAULT_PATH.equals(indexableField.name()) == false) { - continue; - } - if (first == null) { - first = indexableField; - } - if (indexableField.fieldType().docValuesType() == DocValuesType.SORTED_NUMERIC) { - if (foundFsTimestampField) { - throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] encountered multiple values"); - } - foundFsTimestampField = true; - } - } - if (first == null) { - throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] is missing"); - } + long timestamp = extractTimestampValue(context.doc()); + var indexMode = context.indexSettings().getMode(); if (indexMode.shouldValidateTimestamp()) { TimestampBounds bounds = context.indexSettings().getTimestampBounds(); - validateTimestamp(bounds, first, context); + validateTimestamp(bounds, timestamp, context); } } - private static void validateTimestamp(TimestampBounds bounds, IndexableField field, DocumentParserContext context) { - long originValue = field.numericValue().longValue(); + private static void validateTimestamp(TimestampBounds bounds, long originValue, DocumentParserContext context) { long value = originValue; Resolution resolution; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index c817bed6e503e..c70414807cdce 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -293,7 +293,7 @@ DateFormatter buildFormatter() { logger.warn(() -> "Error parsing format [" + format.getValue() + "] of legacy index, falling back to default", e); return DateFormatter.forPattern(format.getDefaultValue()).withLocale(locale.getValue()); } else { - throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); + throw new IllegalArgumentException("Error parsing [format] on field [" + leafName() + "]: " + e.getMessage(), e); } } } @@ -306,7 +306,7 @@ private FieldValues scriptValues() { return factory == null ? null : (lookup, ctx, doc, consumer) -> factory.newFactory( - name(), + leafName(), script.get().getParams(), lookup, buildFormatter(), @@ -327,7 +327,7 @@ private Long parseNullValue(DateFieldType fieldType) { return fieldType.parse(nullValue.getValue()); } catch (Exception e) { if (indexCreatedVersion.onOrAfter(IndexVersions.V_8_0_0)) { - throw new MapperParsingException("Error parsing [null_value] on field [" + name() + "]: " + e.getMessage(), e); + throw new MapperParsingException("Error parsing [null_value] on field [" + leafName() + "]: " + e.getMessage(), e); } else { DEPRECATION_LOGGER.warn( DeprecationCategory.MAPPINGS, @@ -335,7 +335,7 @@ private Long parseNullValue(DateFieldType fieldType) { "Error parsing [" + nullValue.getValue() + "] as date in [null_value] on field [" - + name() + + leafName() + "]); [null_value] will be ignored" ); return null; @@ -346,7 +346,7 @@ private Long parseNullValue(DateFieldType fieldType) { @Override public DateFieldMapper build(MapperBuilderContext context) { DateFieldType ft = new DateFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), index.getValue() && indexCreatedVersion.isLegacyIndexVersion() == false, index.getValue(), store.getValue(), @@ -359,13 +359,14 @@ public DateFieldMapper build(MapperBuilderContext context) { ); Long nullTimestamp = parseNullValue(ft); - if (name().equals(DataStreamTimestampFieldMapper.DEFAULT_PATH) + if (ft.name().equals(DataStreamTimestampFieldMapper.DEFAULT_PATH) && context.isDataStream() && ignoreMalformed.isConfigured() == false) { ignoreMalformed.setValue(false); } + return new DateFieldMapper( - name(), + leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, @@ -868,8 +869,10 @@ public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { private final ScriptCompiler scriptCompiler; private final FieldValues scriptValues; + private final boolean isDataStreamTimestampField; + private DateFieldMapper( - String simpleName, + String leafName, MappedFieldType mappedFieldType, MultiFields multiFields, CopyTo copyTo, @@ -878,7 +881,7 @@ private DateFieldMapper( boolean isSourceSynthetic, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo, builder.script.get() != null, builder.onScriptError.get()); + super(leafName, mappedFieldType, multiFields, copyTo, builder.script.get() != null, builder.onScriptError.get()); this.store = builder.store.getValue(); this.indexed = builder.index.getValue(); this.hasDocValues = builder.docValues.getValue(); @@ -894,11 +897,12 @@ private DateFieldMapper( this.script = builder.script.get(); this.scriptCompiler = builder.scriptCompiler; this.scriptValues = builder.scriptValues(); + this.isDataStreamTimestampField = mappedFieldType.name().equals(DataStreamTimestampFieldMapper.DEFAULT_PATH); } @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), resolution, null, scriptCompiler, ignoreMalformedByDefault, indexCreatedVersion).init(this); + return new Builder(leafName(), resolution, null, scriptCompiler, ignoreMalformedByDefault, indexCreatedVersion).init(this); } @Override @@ -929,7 +933,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio context.addIgnoredField(mappedFieldType.name()); if (isSourceSynthetic) { // Save a copy of the field so synthetic source can load it - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), context.parser())); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser())); } return; } else { @@ -942,6 +946,16 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } private void indexValue(DocumentParserContext context, long timestamp) { + // DataStreamTimestampFieldMapper and TsidExtractingFieldMapper need to use timestamp value, + // so when this is true we store it in a well-known place + // instead of forcing them to iterate over all fields. + // + // DataStreamTimestampFieldMapper is present and enabled both + // in data streams and standalone indices in time_series mode + if (isDataStreamTimestampField && context.mappingLookup().isDataStreamTimestampFieldEnabled()) { + DataStreamTimestampFieldMapper.storeTimestampValueForReuse(context.doc(), timestamp); + } + if (indexed && hasDocValues) { context.doc().add(new LongField(fieldType().name(), timestamp)); } else if (hasDocValues) { @@ -989,15 +1003,19 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { } if (hasDocValues == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName(), ignoreMalformed) { + return new SortedNumericDocValuesSyntheticFieldLoader(fullPath(), leafName(), ignoreMalformed) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(fieldType().format(value, fieldType().dateTimeFormatter())); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 0136175cc6391..55a48853ee679 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -9,7 +9,9 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexSortConfig; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -21,6 +23,9 @@ public class DocumentMapper { private final MappingLookup mappingLookup; private final DocumentParser documentParser; private final MapperMetrics mapperMetrics; + private final IndexVersion indexVersion; + + static final NodeFeature INDEX_SORTING_ON_NESTED = new NodeFeature("mapper.index_sorting_on_nested"); /** * Create a new {@link DocumentMapper} that holds empty mappings. @@ -50,10 +55,11 @@ public static DocumentMapper createEmpty(MapperService mapperService) { MapperMetrics mapperMetrics ) { this.documentParser = documentParser; - this.type = mapping.getRoot().name(); + this.type = mapping.getRoot().fullPath(); this.mappingLookup = MappingLookup.fromMapping(mapping); this.mappingSource = source; this.mapperMetrics = mapperMetrics; + this.indexVersion = version; assert mapping.toCompressedXContent().equals(source) || isSyntheticSourceMalformed(source, version) : "provided source [" + source + "] differs from mapping [" + mapping.toCompressedXContent() + "]"; @@ -134,7 +140,18 @@ public void validate(IndexSettings settings, boolean checkLimits) { } if (settings.getIndexSortConfig().hasIndexSort() && mappers().nestedLookup() != NestedLookup.EMPTY) { - throw new IllegalArgumentException("cannot have nested fields when index sort is activated"); + if (indexVersion.before(IndexVersions.INDEX_SORTING_ON_NESTED)) { + throw new IllegalArgumentException("cannot have nested fields when index sort is activated"); + } + for (String field : settings.getValue(IndexSortConfig.INDEX_SORT_FIELD_SETTING)) { + for (NestedObjectMapper nestedObjectMapper : mappers().nestedLookup().getNestedMappers().values()) { + if (field.startsWith(nestedObjectMapper.fullPath())) { + throw new IllegalArgumentException( + "cannot apply index sort to field [" + field + "] under nested object [" + nestedObjectMapper.fullPath() + "]" + ); + } + } + } } List routingPaths = settings.getIndexMetadata().getRoutingPaths(); for (String path : routingPaths) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 0966698277723..8bf7f3f4e72a3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -161,7 +161,13 @@ private static void executeIndexTimeScripts(DocumentParserContext context) { SearchLookup searchLookup = new SearchLookup( context.mappingLookup().indexTimeLookup()::get, (ft, lookup, fto) -> ft.fielddataBuilder( - new FieldDataContext(context.indexSettings().getIndex().getName(), lookup, context.mappingLookup()::sourcePaths, fto) + new FieldDataContext( + context.indexSettings().getIndex().getName(), + context.indexSettings(), + lookup, + context.mappingLookup()::sourcePaths, + fto + ) ).build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()), (ctx, doc) -> Source.fromBytes(context.sourceToParse().source()) ); @@ -169,7 +175,7 @@ private static void executeIndexTimeScripts(DocumentParserContext context) { // the document reader, so to ensure that we don't run them multiple times we // guard them with an 'executed' boolean Map> fieldScripts = new HashMap<>(); - indexTimeScriptMappers.forEach(mapper -> fieldScripts.put(mapper.name(), new Consumer<>() { + indexTimeScriptMappers.forEach(mapper -> fieldScripts.put(mapper.fullPath(), new Consumer<>() { boolean executed = false; @Override @@ -250,7 +256,7 @@ static Mapping createDynamicUpdate(DocumentParserContext context) { return null; } RootObjectMapper.Builder rootBuilder = context.updateRoot(); - context.getDynamicMappers().forEach(mapper -> rootBuilder.addDynamic(mapper.name(), null, mapper, context)); + context.getDynamicMappers().forEach(mapper -> rootBuilder.addDynamic(mapper.fullPath(), null, mapper, context)); for (RuntimeField runtimeField : context.getDynamicRuntimeFields()) { rootBuilder.addRuntimeField(runtimeField); @@ -294,8 +300,8 @@ static void parseObjectOrNested(DocumentParserContext context) throws IOExceptio Tuple tuple = XContentDataHelper.cloneSubContext(context); context.addIgnoredField( new IgnoredSourceFieldMapper.NameValue( - context.parent().name(), - context.parent().fullPath().indexOf(context.parent().simpleName()), + context.parent().fullPath(), + context.parent().fullPath().indexOf(context.parent().leafName()), XContentDataHelper.encodeXContentBuilder(tuple.v2()), context.doc() ) @@ -327,7 +333,7 @@ private static void throwOnConcreteValue(ObjectMapper mapper, String currentFiel throw new DocumentParsingException( context.parser().getTokenLocation(), "object mapping for [" - + mapper.name() + + mapper.fullPath() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value" @@ -384,7 +390,7 @@ private static void throwEOF(ObjectMapper mapper, DocumentParserContext context) throw new DocumentParsingException( context.parser().getTokenLocation(), "object mapping for [" - + mapper.name() + + mapper.fullPath() + "] tried to parse field [" + context.parser().currentName() + "] as object, but got EOF, has a concrete value been provided to it?" @@ -429,7 +435,7 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr } else if (mapper instanceof FieldMapper fieldMapper) { if (shouldFlattenObject(context, fieldMapper)) { // we pass the mapper's simpleName as parentName to the new DocumentParserContext - String currentFieldName = fieldMapper.simpleName(); + String currentFieldName = fieldMapper.leafName(); context.path().remove(); parseObjectOrNested(context.createFlattenContext(currentFieldName)); context.path().add(currentFieldName); @@ -440,7 +446,7 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr context.addIgnoredField( IgnoredSourceFieldMapper.NameValue.fromContext( context, - fieldMapper.name(), + fieldMapper.fullPath(), XContentDataHelper.encodeXContentBuilder(contextWithSourceToStore.v2()) ) ); @@ -476,14 +482,14 @@ private static boolean shouldFlattenObject(DocumentParserContext context, FieldM private static void throwOnUnrecognizedMapperType(Mapper mapper) { throw new IllegalStateException( - "The provided mapper [" + mapper.name() + "] has an unrecognized type [" + mapper.getClass().getSimpleName() + "]." + "The provided mapper [" + mapper.fullPath() + "] has an unrecognized type [" + mapper.getClass().getSimpleName() + "]." ); } private static void throwOnCopyToOnFieldAlias(DocumentParserContext context, Mapper mapper) { throw new DocumentParsingException( context.parser().getTokenLocation(), - "Cannot " + (context.isWithinCopyTo() ? "copy" : "write") + " to a field alias [" + mapper.name() + "]." + "Cannot " + (context.isWithinCopyTo() ? "copy" : "write") + " to a field alias [" + mapper.fullPath() + "]." ); } @@ -491,7 +497,7 @@ private static void throwOnCopyToOnObject(Mapper mapper, List copyToFiel throw new DocumentParsingException( context.parser().getTokenLocation(), "Cannot copy field [" - + mapper.name() + + mapper.fullPath() + "] to fields " + copyToFields + ". Copy-to currently only works for value-type fields, not objects." @@ -562,9 +568,9 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur throw new DocumentParsingException( context.parser().getTokenLocation(), "Tried to add nested object [" - + dynamicObjectMapper.simpleName() + + dynamicObjectMapper.leafName() + "] to object [" - + context.parent().name() + + context.parent().fullPath() + "] which does not support subobjects" ); } @@ -594,7 +600,7 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur private static void throwOnCreateDynamicNestedViaCopyTo(Mapper dynamicObjectMapper, DocumentParserContext context) { throw new DocumentParsingException( context.parser().getTokenLocation(), - "It is forbidden to create dynamic nested objects ([" + dynamicObjectMapper.name() + "]) through `copy_to`" + "It is forbidden to create dynamic nested objects ([" + dynamicObjectMapper.fullPath() + "]) through `copy_to`" ); } @@ -759,7 +765,7 @@ private static void throwEOFOnParseArray(String arrayFieldName, DocumentParserCo throw new DocumentParsingException( context.parser().getTokenLocation(), "object mapping for [" - + context.parent().name() + + context.parent().fullPath() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?" @@ -782,7 +788,7 @@ private static void throwOnNoFieldName(DocumentParserContext context) throws IOE throw new DocumentParsingException( context.parser().getTokenLocation(), "object mapping [" - + context.parent().name() + + context.parent().fullPath() + "] trying to serialize a value with" + " no field associated with it, current value [" + context.parser().textOrNull() @@ -937,7 +943,7 @@ protected void parseCreateField(DocumentParserContext context) { } @Override - public String name() { + public String fullPath() { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index f47d86b746a38..248369b249007 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -325,7 +325,7 @@ final boolean getClonedSource() { return clonedSource; } - final boolean canAddIgnoredField() { + public final boolean canAddIgnoredField() { return mappingLookup.isSourceSynthetic() && clonedSource == false; } @@ -373,15 +373,15 @@ public boolean isCopyToField(String name) { public final boolean addDynamicMapper(Mapper mapper) { // eagerly check object depth limit here to avoid stack overflow errors if (mapper instanceof ObjectMapper) { - MappingLookup.checkObjectDepthLimit(indexSettings().getMappingDepthLimit(), mapper.name()); + MappingLookup.checkObjectDepthLimit(indexSettings().getMappingDepthLimit(), mapper.fullPath()); } // eagerly check field name limit here to avoid OOM errors // only check fields that are not already mapped or tracked in order to avoid hitting field limit too early via double-counting // note that existing fields can also receive dynamic mapping updates (e.g. constant_keyword to fix the value) - if (mappingLookup.getMapper(mapper.name()) == null - && mappingLookup.objectMappers().containsKey(mapper.name()) == false - && dynamicMappers.containsKey(mapper.name()) == false) { + if (mappingLookup.getMapper(mapper.fullPath()) == null + && mappingLookup.objectMappers().containsKey(mapper.fullPath()) == false + && dynamicMappers.containsKey(mapper.fullPath()) == false) { int mapperSize = mapper.getTotalFieldsCount(); int additionalFieldsToAdd = getNewFieldsSize() + mapperSize; if (indexSettings().isIgnoreDynamicFieldsBeyondLimit()) { @@ -391,15 +391,15 @@ public final boolean addDynamicMapper(Mapper mapper) { addIgnoredField( IgnoredSourceFieldMapper.NameValue.fromContext( this, - mapper.name(), + mapper.fullPath(), XContentDataHelper.encodeToken(parser()) ) ); } catch (IOException e) { - throw new IllegalArgumentException("failed to parse field [" + mapper.name() + " ]", e); + throw new IllegalArgumentException("failed to parse field [" + mapper.fullPath() + " ]", e); } } - addIgnoredField(mapper.name()); + addIgnoredField(mapper.fullPath()); return false; } } else { @@ -408,7 +408,7 @@ public final boolean addDynamicMapper(Mapper mapper) { dynamicMappersSize.add(mapperSize); } if (mapper instanceof ObjectMapper objectMapper) { - dynamicObjectMappers.put(objectMapper.name(), objectMapper); + dynamicObjectMappers.put(objectMapper.fullPath(), objectMapper); // dynamic object mappers may have been obtained from applying a dynamic template, in which case their definition may contain // sub-fields as well as sub-objects that need to be added to the mappings for (Mapper submapper : objectMapper.mappers.values()) { @@ -425,7 +425,7 @@ public final boolean addDynamicMapper(Mapper mapper) { // dynamically mapped objects when the incoming document defines no sub-fields in them: // 1) by default, they would be empty containers in the mappings, is it then important to map them? // 2) they can be the result of applying a dynamic template which may define sub-fields or set dynamic, enabled or subobjects. - dynamicMappers.computeIfAbsent(mapper.name(), k -> new ArrayList<>()).add(mapper); + dynamicMappers.computeIfAbsent(mapper.fullPath(), k -> new ArrayList<>()).add(mapper); return true; } @@ -673,10 +673,11 @@ public final MapperBuilderContext createDynamicMapperBuilderContext() { return new MapperBuilderContext( p, mappingLookup.isSourceSynthetic(), - false, + mappingLookup.isDataStreamTimestampFieldEnabled(), containsDimensions, dynamic, - MergeReason.MAPPING_UPDATE + MergeReason.MAPPING_UPDATE, + false ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java index 8aa29e6317d51..c62473fb195b6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldAliasMapper.java @@ -32,16 +32,16 @@ public static class Names { } private final String name; - private final String path; + private final String targetPath; - public FieldAliasMapper(String simpleName, String name, String path) { + public FieldAliasMapper(String simpleName, String name, String targetPath) { super(simpleName); this.name = Mapper.internFieldName(name); - this.path = path; + this.targetPath = targetPath; } @Override - public String name() { + public String fullPath() { return name; } @@ -50,15 +50,15 @@ public String typeName() { return CONTENT_TYPE; } - public String path() { - return path; + public String targetPath() { + return targetPath; } @Override public Mapper merge(Mapper mergeWith, MapperMergeContext mapperMergeContext) { if ((mergeWith instanceof FieldAliasMapper) == false) { throw new IllegalArgumentException( - "Cannot merge a field alias mapping [" + name() + "] with a mapping that is not for a field alias." + "Cannot merge a field alias mapping [" + fullPath() + "] with a mapping that is not for a field alias." ); } return mergeWith; @@ -71,37 +71,37 @@ public Iterator iterator() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject(simpleName()).field("type", CONTENT_TYPE).field(Names.PATH, path).endObject(); + return builder.startObject(leafName()).field("type", CONTENT_TYPE).field(Names.PATH, targetPath).endObject(); } @Override public void validate(MappingLookup mappers) { - if (Objects.equals(this.path(), this.name())) { + if (Objects.equals(this.targetPath(), this.fullPath())) { throw new MapperParsingException( - "Invalid [path] value [" + path + "] for field alias [" + name() + "]: an alias cannot refer to itself." + "Invalid [path] value [" + targetPath + "] for field alias [" + fullPath() + "]: an alias cannot refer to itself." ); } - if (mappers.fieldTypesLookup().get(path) == null) { + if (mappers.fieldTypesLookup().get(targetPath) == null) { throw new MapperParsingException( "Invalid [path] value [" - + path + + targetPath + "] for field alias [" - + name() + + fullPath() + "]: an alias must refer to an existing field in the mappings." ); } - if (mappers.getMapper(path) instanceof FieldAliasMapper) { + if (mappers.getMapper(targetPath) instanceof FieldAliasMapper) { throw new MapperParsingException( - "Invalid [path] value [" + path + "] for field alias [" + name() + "]: an alias cannot refer to another alias." + "Invalid [path] value [" + targetPath + "] for field alias [" + fullPath() + "]: an alias cannot refer to another alias." ); } String aliasScope = mappers.nestedLookup().getNestedParent(name); - String pathScope = mappers.nestedLookup().getNestedParent(path); + String pathScope = mappers.nestedLookup().getNestedParent(targetPath); if (Objects.equals(aliasScope, pathScope) == false) { StringBuilder message = new StringBuilder( "Invalid [path] value [" - + path + + targetPath + "] for field alias [" + name + "]: an alias must have the same nested scope as its target. " @@ -151,8 +151,8 @@ public Builder path(String path) { @Override public FieldAliasMapper build(MapperBuilderContext context) { - String fullName = context.buildFullName(name()); - return new FieldAliasMapper(name(), fullName, path); + String fullName = context.buildFullName(leafName()); + return new FieldAliasMapper(leafName(), fullName, path); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 4338a62d79ab9..9e6b3132e8492 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -118,7 +118,7 @@ protected FieldMapper( } @Override - public String name() { + public String fullPath() { return fieldType().name(); } @@ -194,7 +194,7 @@ public void parse(DocumentParserContext context) throws IOException { } private void doParseMultiFields(DocumentParserContext context) throws IOException { - context.path().add(simpleName()); + context.path().add(leafName()); for (FieldMapper mapper : multiFields.mappers) { mapper.parse(context); } @@ -277,9 +277,9 @@ public final void executeScript( indexScriptValues(searchLookup, readerContext, doc, documentParserContext); } catch (Exception e) { if (onScriptError == OnScriptError.CONTINUE) { - documentParserContext.addIgnoredField(name()); + documentParserContext.addIgnoredField(fullPath()); } else { - throw new DocumentParsingException(XContentLocation.UNKNOWN, "Error executing script on field [" + name() + "]", e); + throw new DocumentParsingException(XContentLocation.UNKNOWN, "Error executing script on field [" + fullPath() + "]", e); } } } @@ -299,7 +299,7 @@ protected void indexScriptValues( int doc, DocumentParserContext documentParserContext ) { - throw new UnsupportedOperationException("FieldMapper " + name() + " does not support [script]"); + throw new UnsupportedOperationException("FieldMapper " + fullPath() + " does not support [script]"); } @Override @@ -321,11 +321,11 @@ public Iterator sourcePathUsedBy() { @Override public final void validate(MappingLookup mappers) { if (this.copyTo() != null && this.copyTo().copyToFields().isEmpty() == false) { - if (mappers.isMultiField(this.name())) { - throw new IllegalArgumentException("[copy_to] may not be used to copy from a multi-field: [" + this.name() + "]"); + if (mappers.isMultiField(this.fullPath())) { + throw new IllegalArgumentException("[copy_to] may not be used to copy from a multi-field: [" + this.fullPath() + "]"); } - final String sourceScope = mappers.nestedLookup().getNestedParent(this.name()); + final String sourceScope = mappers.nestedLookup().getNestedParent(this.fullPath()); for (String copyTo : this.copyTo().copyToFields()) { if (mappers.isMultiField(copyTo)) { throw new IllegalArgumentException("[copy_to] may not be used to copy to a multi-field: [" + copyTo + "]"); @@ -381,7 +381,7 @@ public final FieldMapper merge(Mapper mergeWith, MapperMergeContext mapperMergeC if (mergeWith instanceof FieldMapper == false) { throw new IllegalArgumentException( "mapper [" - + name() + + fullPath() + "] cannot be changed from type [" + contentType() + "] to [" @@ -395,7 +395,7 @@ public final FieldMapper merge(Mapper mergeWith, MapperMergeContext mapperMergeC if (builder == null) { return (FieldMapper) mergeWith; } - Conflicts conflicts = new Conflicts(name()); + Conflicts conflicts = new Conflicts(fullPath()); builder.merge((FieldMapper) mergeWith, conflicts, mapperMergeContext); conflicts.check(); return builder.build(mapperMergeContext.getMapperBuilderContext()); @@ -404,19 +404,19 @@ public final FieldMapper merge(Mapper mergeWith, MapperMergeContext mapperMergeC protected void checkIncomingMergeType(FieldMapper mergeWith) { if (Objects.equals(this.getClass(), mergeWith.getClass()) == false) { throw new IllegalArgumentException( - "mapper [" + name() + "] cannot be changed from type [" + contentType() + "] to [" + mergeWith.contentType() + "]" + "mapper [" + fullPath() + "] cannot be changed from type [" + contentType() + "] to [" + mergeWith.contentType() + "]" ); } if (Objects.equals(contentType(), mergeWith.contentType()) == false) { throw new IllegalArgumentException( - "mapper [" + name() + "] cannot be changed from type [" + contentType() + "] to [" + mergeWith.contentType() + "]" + "mapper [" + fullPath() + "] cannot be changed from type [" + contentType() + "] to [" + mergeWith.contentType() + "]" ); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(simpleName()); + builder.startObject(leafName()); doXContentBody(builder, params); return builder.endObject(); } @@ -489,7 +489,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (syntheticSourceMode() == SyntheticSourceMode.FALLBACK) { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } // Nothing because it is handled at `ObjectMapper` level. @@ -514,7 +514,7 @@ public static class Builder { private boolean hasSyntheticSourceCompatibleKeywordField; public Builder add(FieldMapper.Builder builder) { - mapperBuilders.put(builder.name(), builder::build); + mapperBuilders.put(builder.leafName(), builder::build); if (builder instanceof KeywordFieldMapper.Builder kwd) { if (kwd.hasNormalizer() == false && (kwd.hasDocValues() || kwd.isStored())) { @@ -526,7 +526,7 @@ public Builder add(FieldMapper.Builder builder) { } private void add(FieldMapper mapper) { - mapperBuilders.put(mapper.simpleName(), context -> mapper); + mapperBuilders.put(mapper.leafName(), context -> mapper); if (mapper instanceof KeywordFieldMapper kwd) { if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { @@ -536,12 +536,12 @@ private void add(FieldMapper mapper) { } private void update(FieldMapper toMerge, MapperMergeContext context) { - if (mapperBuilders.containsKey(toMerge.simpleName()) == false) { + if (mapperBuilders.containsKey(toMerge.leafName()) == false) { if (context.decrementFieldBudgetIfPossible(toMerge.getTotalFieldsCount())) { add(toMerge); } } else { - FieldMapper existing = mapperBuilders.get(toMerge.simpleName()).apply(context.getMapperBuilderContext()); + FieldMapper existing = mapperBuilders.get(toMerge.leafName()).apply(context.getMapperBuilderContext()); add(existing.merge(toMerge, context)); } } @@ -559,7 +559,7 @@ public MultiFields build(Mapper.Builder mainFieldBuilder, MapperBuilderContext c return empty(); } else { FieldMapper[] mappers = new FieldMapper[mapperBuilders.size()]; - context = context.createChildContext(mainFieldBuilder.name(), null); + context = context.createChildContext(mainFieldBuilder.leafName(), null); int i = 0; for (Map.Entry> entry : this.mapperBuilders.entrySet()) { mappers[i++] = entry.getValue().apply(context); @@ -574,7 +574,7 @@ public MultiFields build(Mapper.Builder mainFieldBuilder, MapperBuilderContext c private MultiFields(FieldMapper[] mappers) { this.mappers = mappers; // sort for consistent iteration order + serialization - Arrays.sort(this.mappers, Comparator.comparing(FieldMapper::name)); + Arrays.sort(this.mappers, Comparator.comparing(FieldMapper::fullPath)); } public void parse(FieldMapper mainField, DocumentParserContext context, Supplier multiFieldContextSupplier) @@ -584,7 +584,7 @@ public void parse(FieldMapper mainField, DocumentParserContext context, Supplier if (mappers.length == 0) { return; } - context.path().add(mainField.simpleName()); + context.path().add(mainField.leafName()); for (FieldMapper mapper : mappers) { mapper.parse(multiFieldContextSupplier.get()); } @@ -1314,7 +1314,7 @@ protected void merge(FieldMapper in, Conflicts conflicts, MapperMergeContext map for (Parameter param : getParameters()) { param.merge(in, conflicts); } - MapperMergeContext childContext = mapperMergeContext.createChildContext(in.simpleName(), null); + MapperMergeContext childContext = mapperMergeContext.createChildContext(in.leafName(), null); for (FieldMapper newSubField : in.multiFields.mappers) { multiFieldsBuilder.update(newSubField, childContext); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index 7070c387fbb97..65ee587d8cb50 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -55,10 +55,10 @@ final class FieldTypeLookup { final Map dynamicFieldTypes = new HashMap<>(); final Map> fieldToCopiedFields = new HashMap<>(); for (FieldMapper fieldMapper : fieldMappers) { - String fieldName = fieldMapper.name(); + String fieldName = fieldMapper.fullPath(); MappedFieldType fieldType = fieldMapper.fieldType(); fullNameToFieldType.put(fieldType.name(), fieldType); - fieldMapper.sourcePathUsedBy().forEachRemaining(mapper -> fullSubfieldNameToParentPath.put(mapper.name(), fieldName)); + fieldMapper.sourcePathUsedBy().forEachRemaining(mapper -> fullSubfieldNameToParentPath.put(mapper.fullPath(), fieldName)); if (fieldType instanceof DynamicFieldType) { dynamicFieldTypes.put(fieldType.name(), (DynamicFieldType) fieldType); } @@ -80,8 +80,8 @@ final class FieldTypeLookup { this.maxParentPathDots = maxParentPathDots; for (FieldAliasMapper fieldAliasMapper : fieldAliasMappers) { - String aliasName = fieldAliasMapper.name(); - String path = fieldAliasMapper.path(); + String aliasName = fieldAliasMapper.fullPath(); + String path = fieldAliasMapper.targetPath(); MappedFieldType fieldType = fullNameToFieldType.get(path); if (fieldType == null) { continue; @@ -99,28 +99,35 @@ final class FieldTypeLookup { for (PassThroughObjectMapper passThroughMapper : passThroughMappers) { for (Mapper subfield : passThroughMapper.mappers.values()) { if (subfield instanceof FieldMapper fieldMapper) { - String name = fieldMapper.simpleName(); + String name = fieldMapper.leafName(); // Check for conflict between PassThroughObjectMapper subfields. PassThroughObjectMapper conflict = passThroughFieldAliases.put(name, passThroughMapper); if (conflict != null) { if (conflict.priority() > passThroughMapper.priority()) { // Keep the conflicting field if it has higher priority. passThroughFieldAliases.put(name, conflict); - continue; } - } else if (fullNameToFieldType.containsKey(name)) { - // There's an existing field or alias for the same field. - continue; - } - MappedFieldType fieldType = fieldMapper.fieldType(); - fullNameToFieldType.put(name, fieldType); - if (fieldType instanceof DynamicFieldType) { - dynamicFieldTypes.put(name, (DynamicFieldType) fieldType); } } } } + for (Map.Entry entry : passThroughFieldAliases.entrySet()) { + String name = entry.getKey(); + if (fullNameToFieldType.containsKey(name)) { + // There's an existing field or alias for the same field. + continue; + } + Mapper mapper = entry.getValue().getMapper(name); + if (mapper instanceof FieldMapper fieldMapper) { + MappedFieldType fieldType = fieldMapper.fieldType(); + fullNameToFieldType.put(name, fieldType); + if (fieldType instanceof DynamicFieldType) { + dynamicFieldTypes.put(name, (DynamicFieldType) fieldType); + } + } + } + for (MappedFieldType fieldType : RuntimeField.collectFieldTypes(runtimeFields).values()) { // this will override concrete fields with runtime fields that have the same name fullNameToFieldType.put(fieldType.name(), fieldType); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index b31a61d50ecdb..b34b94191f917 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -188,7 +188,7 @@ private FieldValues scriptValues() { GeoPointFieldScript.Factory factory = scriptCompiler.compile(this.script.get(), GeoPointFieldScript.CONTEXT); return factory == null ? null - : (lookup, ctx, doc, consumer) -> factory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> factory.newFactory(leafName(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -197,7 +197,7 @@ private FieldValues scriptValues() { public FieldMapper build(MapperBuilderContext context) { boolean ignoreMalformedEnabled = ignoreMalformed.get().value(); Parser geoParser = new GeoPointParser( - name(), + leafName(), (parser) -> GeoUtils.parseGeoPoint(parser, ignoreZValue.get().value()), nullValue.get(), ignoreZValue.get().value(), @@ -206,7 +206,7 @@ public FieldMapper build(MapperBuilderContext context) { context.isSourceSynthetic() && ignoreMalformedEnabled ); GeoPointFieldType ft = new GeoPointFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.get() && indexCreatedVersion.isLegacyIndexVersion() == false, stored.get(), hasDocValues.get(), @@ -218,9 +218,9 @@ public FieldMapper build(MapperBuilderContext context) { indexMode ); if (this.script.get() == null) { - return new GeoPointFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, geoParser, this); + return new GeoPointFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, geoParser, this); } - return new GeoPointFieldMapper(name(), ft, geoParser, this); + return new GeoPointFieldMapper(leafName(), ft, geoParser, this); } } @@ -284,7 +284,7 @@ public GeoPointFieldMapper(String simpleName, MappedFieldType mappedFieldType, P @Override public FieldMapper.Builder getMergeBuilder() { return new Builder( - simpleName(), + leafName(), builder.scriptCompiler, builder.ignoreMalformed.getDefaultValue().value(), indexCreatedVersion, @@ -612,7 +612,7 @@ protected void onMalformedValue(DocumentParserContext context, XContentBuilder m throws IOException { super.onMalformedValue(context, malformedDataForSyntheticSource, cause); if (malformedDataForSyntheticSource != null) { - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), malformedDataForSyntheticSource)); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), malformedDataForSyntheticSource)); } } @@ -628,15 +628,19 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { } if (fieldType().hasDocValues() == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName(), ignoreMalformed()) { + return new SortedNumericDocValuesSyntheticFieldLoader(fullPath(), leafName(), ignoreMalformed()) { final GeoPoint point = new GeoPoint(); @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java deleted file mode 100644 index 541538f65a550..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.index.mapper; - -import org.apache.lucene.document.LatLonShape; -import org.apache.lucene.geo.LatLonGeometry; -import org.apache.lucene.search.Query; -import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.geo.GeometryFormatterFactory; -import org.elasticsearch.common.geo.GeometryParser; -import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.SearchExecutionContext; - -import java.util.List; -import java.util.Map; -import java.util.function.Function; - -/** - * FieldMapper for indexing {@link LatLonShape}s. - *

      - * Currently Shapes can only be indexed and can only be queried using - * {@link org.elasticsearch.index.query.GeoShapeQueryBuilder}, consequently - * a lot of behavior in this Mapper is disabled. - *

      - * Format supported: - *

      - * "field" : { - * "type" : "polygon", - * "coordinates" : [ - * [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ] - * ] - * } - *

      - * or: - *

      - * "field" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0)) - */ -public class GeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper { - - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(GeoShapeFieldMapper.class); - - public static final String CONTENT_TYPE = "geo_shape"; - - private static Builder builder(FieldMapper in) { - return ((GeoShapeFieldMapper) in).builder; - } - - public static class Builder extends FieldMapper.Builder { - - final Parameter indexed = Parameter.indexParam(m -> builder(m).indexed.get(), true); - - final Parameter> ignoreMalformed; - final Parameter> ignoreZValue = ignoreZValueParam(m -> builder(m).ignoreZValue.get()); - final Parameter> coerce; - final Parameter> orientation = orientationParam(m -> builder(m).orientation.get()); - - final Parameter> meta = Parameter.metaParam(); - - public Builder(String name, boolean ignoreMalformedByDefault, boolean coerceByDefault) { - super(name); - this.ignoreMalformed = ignoreMalformedParam(m -> builder(m).ignoreMalformed.get(), ignoreMalformedByDefault); - this.coerce = coerceParam(m -> builder(m).coerce.get(), coerceByDefault); - } - - public Builder ignoreZValue(boolean ignoreZValue) { - this.ignoreZValue.setValue(Explicit.explicitBoolean(ignoreZValue)); - return this; - } - - @Override - protected Parameter[] getParameters() { - return new Parameter[] { indexed, ignoreMalformed, ignoreZValue, coerce, orientation, meta }; - } - - @Override - public GeoShapeFieldMapper build(MapperBuilderContext context) { - if (multiFieldsBuilder.hasMultiFields()) { - DEPRECATION_LOGGER.warn( - DeprecationCategory.MAPPINGS, - "geo_shape_multifields", - "Adding multifields to [geo_shape] mappers has no effect and will be forbidden in future" - ); - } - GeometryParser geometryParser = new GeometryParser( - orientation.get().value().getAsBoolean(), - coerce.get().value(), - ignoreZValue.get().value() - ); - GeoShapeParser geoShapeParser = new GeoShapeParser(geometryParser, orientation.get().value()); - GeoShapeFieldType ft = new GeoShapeFieldType( - context.buildFullName(name()), - indexed.get(), - orientation.get().value(), - geoShapeParser, - meta.get() - ); - return new GeoShapeFieldMapper( - name(), - ft, - multiFieldsBuilder.build(this, context), - copyTo, - new GeoShapeIndexer(orientation.get().value(), context.buildFullName(name())), - geoShapeParser, - this - ); - } - } - - public static class GeoShapeFieldType extends AbstractShapeGeometryFieldType implements GeoShapeQueryable { - - public GeoShapeFieldType(String name, boolean indexed, Orientation orientation, Parser parser, Map meta) { - super(name, indexed, false, false, parser, orientation, meta); - } - - @Override - public String typeName() { - return CONTENT_TYPE; - } - - @Override - public Query geoShapeQuery(SearchExecutionContext context, String fieldName, ShapeRelation relation, LatLonGeometry... geometries) { - // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0) - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { - throw new QueryShardException( - context, - ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]." - ); - } - return LatLonShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), geometries); - } - - @Override - protected Function, List> getFormatter(String format) { - return GeometryFormatterFactory.getFormatter(format, Function.identity()); - } - } - - @Deprecated - public static Mapper.TypeParser PARSER = (name, node, parserContext) -> { - boolean ignoreMalformedByDefault = IGNORE_MALFORMED_SETTING.get(parserContext.getSettings()); - boolean coerceByDefault = COERCE_SETTING.get(parserContext.getSettings()); - FieldMapper.Builder builder = new Builder(name, ignoreMalformedByDefault, coerceByDefault); - builder.parse(name, parserContext, node); - return builder; - }; - - private final Builder builder; - private final GeoShapeIndexer indexer; - - public GeoShapeFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, - GeoShapeIndexer indexer, - Parser parser, - Builder builder - ) { - super( - simpleName, - mappedFieldType, - builder.ignoreMalformed.get(), - builder.coerce.get(), - builder.ignoreZValue.get(), - builder.orientation.get(), - multiFields, - copyTo, - parser - ); - this.builder = builder; - this.indexer = indexer; - } - - @Override - public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), builder.ignoreMalformed.getDefaultValue().value(), builder.coerce.getDefaultValue().value()).init( - this - ); - } - - @Override - protected void index(DocumentParserContext context, Geometry geometry) { - if (geometry == null) { - return; - } - context.doc().addAll(indexer.indexShape(geometry)); - context.addToFieldNames(fieldType().name()); - } - - @Override - public GeoShapeFieldType fieldType() { - return (GeoShapeFieldType) super.fieldType(); - } - - @Override - protected String contentType() { - return CONTENT_TYPE; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java new file mode 100644 index 0000000000000..8fa1ff3040563 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.plain.ConstantIndexFieldData; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.script.field.DelegateDocValuesField; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.lookup.Source; + +import java.util.Collections; +import java.util.List; + +public class IndexModeFieldMapper extends MetadataFieldMapper { + + static final NodeFeature QUERYING_INDEX_MODE = new NodeFeature("mapper.query_index_mode"); + + public static final String NAME = "_index_mode"; + + public static final String CONTENT_TYPE = "_index_mode"; + + private static final IndexModeFieldMapper INSTANCE = new IndexModeFieldMapper(); + + public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); + + static final class IndexModeFieldType extends ConstantFieldType { + + static final IndexModeFieldType INSTANCE = new IndexModeFieldType(); + + private IndexModeFieldType() { + super(NAME, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + protected boolean matches(String pattern, boolean caseInsensitive, QueryRewriteContext context) { + final String indexMode = context.getIndexSettings().getMode().getName(); + return Regex.simpleMatch(pattern, indexMode, caseInsensitive); + } + + @Override + public Query existsQuery(SearchExecutionContext context) { + return new MatchAllDocsQuery(); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + final String indexMode = fieldDataContext.indexSettings().getMode().getName(); + return new ConstantIndexFieldData.Builder( + indexMode, + name(), + CoreValuesSourceType.KEYWORD, + (dv, n) -> new DelegateDocValuesField( + new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(FieldData.toString(dv))), + n + ) + ); + } + + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + final String indexMode = blContext.indexSettings().getMode().getName(); + return BlockLoader.constantBytes(new BytesRef(indexMode)); + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new ValueFetcher() { + private final List indexMode = List.of(context.getIndexSettings().getMode().getName()); + + @Override + public List fetchValues(Source source, int doc, List ignoredValues) { + return indexMode; + } + + @Override + public StoredFieldsSpec storedFieldsSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + }; + } + + } + + public IndexModeFieldMapper() { + super(IndexModeFieldType.INSTANCE); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { + return SourceLoader.SyntheticFieldLoader.NOTHING; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 1d73e256bd2e9..b69c2b4606ed6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -131,7 +131,7 @@ private InetAddress parseNullValue() { return InetAddresses.forString(nullValueAsString); } catch (Exception e) { if (indexCreatedVersion.onOrAfter(IndexVersions.V_8_0_0)) { - throw new MapperParsingException("Error parsing [null_value] on field [" + name() + "]: " + e.getMessage(), e); + throw new MapperParsingException("Error parsing [null_value] on field [" + leafName() + "]: " + e.getMessage(), e); } else { DEPRECATION_LOGGER.warn( DeprecationCategory.MAPPINGS, @@ -139,7 +139,7 @@ private InetAddress parseNullValue() { "Error parsing [" + nullValue.getValue() + "] as IP in [null_value] on field [" - + name() + + leafName() + "]); [null_value] will be ignored" ); return null; @@ -154,7 +154,7 @@ private FieldValues scriptValues() { IpFieldScript.Factory factory = scriptCompiler.compile(this.script.get(), IpFieldScript.CONTEXT); return factory == null ? null - : (lookup, ctx, doc, consumer) -> factory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> factory.newFactory(leafName(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -170,9 +170,9 @@ public IpFieldMapper build(MapperBuilderContext context) { dimension.setValue(true); } return new IpFieldMapper( - name(), + leafName(), new IpFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.getValue() && indexCreatedVersion.isLegacyIndexVersion() == false, stored.getValue(), hasDocValues.getValue(), @@ -539,7 +539,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio context.addIgnoredField(fieldType().name()); if (storeIgnored) { // Save a copy of the field so synthetic source can load it - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), context.parser())); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser())); } return; } else { @@ -589,14 +589,14 @@ protected void indexScriptValues( @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), scriptCompiler, ignoreMalformedByDefault, indexCreatedVersion).dimension(dimension).init(this); + return new Builder(leafName(), scriptCompiler, ignoreMalformedByDefault, indexCreatedVersion).dimension(dimension).init(this); } @Override public void doValidate(MappingLookup lookup) { - if (dimension && null != lookup.nestedLookup().getNestedParent(name())) { + if (dimension && null != lookup.nestedLookup().getNestedParent(fullPath())) { throw new IllegalArgumentException( - TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + name() + "]" + TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + fullPath() + "]" ); } } @@ -613,15 +613,19 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { } if (hasDocValues == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new SortedSetDocValuesSyntheticFieldLoader(name(), simpleName(), null, ignoreMalformed) { + return new SortedSetDocValuesSyntheticFieldLoader(fullPath(), leafName(), null, ignoreMalformed) { @Override protected BytesRef convert(BytesRef value) { byte[] bytes = Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 438964cf0a092..16aa827e6a251 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.lucene.search.AutomatonQueries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -86,6 +87,8 @@ public final class KeywordFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "keyword"; + static final NodeFeature KEYWORD_DIMENSION_IGNORE_ABOVE = new NodeFeature("mapper.keyword_dimension_ignore_above"); + public static class Defaults { public static final FieldType FIELD_TYPE; @@ -210,7 +213,7 @@ public Builder(String name, IndexAnalyzers indexAnalyzers, ScriptCompiler script + "] are true" ); } - }).precludesParameters(normalizer, ignoreAbove); + }).precludesParameters(normalizer); } public Builder(String name, IndexVersion indexCreatedVersion) { @@ -271,7 +274,7 @@ private FieldValues scriptValues() { StringFieldScript.Factory scriptFactory = scriptCompiler.compile(script.get(), StringFieldScript.CONTEXT); return scriptFactory == null ? null - : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(leafName(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -311,7 +314,7 @@ private KeywordFieldType buildFieldType(MapperBuilderContext context, FieldType ); normalizer = Lucene.KEYWORD_ANALYZER; } else { - throw new MapperParsingException("normalizer [" + normalizerName + "] not found for field [" + name() + "]"); + throw new MapperParsingException("normalizer [" + normalizerName + "] not found for field [" + leafName() + "]"); } } searchAnalyzer = quoteAnalyzer = normalizer; @@ -325,7 +328,7 @@ private KeywordFieldType buildFieldType(MapperBuilderContext context, FieldType dimension(true); } return new KeywordFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), fieldType, normalizer, searchAnalyzer, @@ -347,7 +350,7 @@ public KeywordFieldMapper build(MapperBuilderContext context) { fieldtype = Defaults.FIELD_TYPE; } return new KeywordFieldMapper( - name(), + leafName(), fieldtype, buildFieldType(context, fieldtype), multiFieldsBuilder.build(this, context), @@ -911,7 +914,7 @@ private void indexValue(DocumentParserContext context, String value) { } if (value.length() > fieldType().ignoreAbove()) { - context.addIgnoredField(name()); + context.addIgnoredField(fullPath()); if (storeIgnored) { // Save a copy of the field so synthetic source can load it context.doc().add(new StoredField(originalName(), new BytesRef(value))); @@ -919,7 +922,7 @@ private void indexValue(DocumentParserContext context, String value) { return; } - value = normalizeValue(fieldType().normalizer(), name(), value); + value = normalizeValue(fieldType().normalizer(), fullPath(), value); // convert to utf8 only once before feeding postings/dv/stored fields final BytesRef binaryValue = new BytesRef(value); @@ -995,15 +998,14 @@ public Map indexAnalyzers() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), indexAnalyzers, scriptCompiler, indexCreatedVersion).dimension(fieldType().isDimension()) - .init(this); + return new Builder(leafName(), indexAnalyzers, scriptCompiler, indexCreatedVersion).dimension(fieldType().isDimension()).init(this); } @Override public void doValidate(MappingLookup lookup) { - if (fieldType().isDimension() && null != lookup.nestedLookup().getNestedParent(name())) { + if (fieldType().isDimension() && null != lookup.nestedLookup().getNestedParent(fullPath())) { throw new IllegalArgumentException( - TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + name() + "]" + TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + fullPath() + "]" ); } } @@ -1018,17 +1020,21 @@ boolean hasNormalizer() { * for synthetic source. */ private String originalName() { - return name() + "._original"; + return fullPath() + "._original"; } @Override protected SyntheticSourceMode syntheticSourceMode() { - return SyntheticSourceMode.NATIVE; + if (fieldType.stored() || hasDocValues) { + return SyntheticSourceMode.NATIVE; + } + + return SyntheticSourceMode.FALLBACK; } @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return syntheticFieldLoader(simpleName()); + return syntheticFieldLoader(leafName()); } public SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String simpleName) { @@ -1037,17 +1043,18 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String simpleName) } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } if (hasNormalizer()) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares a normalizer" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares a normalizer" ); } + if (fieldType.stored()) { return new StringStoredFieldFieldLoader( - name(), + fullPath(), simpleName, fieldType().ignoreAbove == Defaults.IGNORE_ABOVE ? null : originalName() ) { @@ -1058,33 +1065,29 @@ protected void write(XContentBuilder b, Object value) throws IOException { } }; } - if (hasDocValues == false) { - throw new IllegalArgumentException( - "field [" - + name() - + "] of type [" - + typeName() - + "] doesn't support synthetic source because it doesn't have doc values and isn't stored" - ); - } - return new SortedSetDocValuesSyntheticFieldLoader( - name(), - simpleName, - fieldType().ignoreAbove == Defaults.IGNORE_ABOVE ? null : originalName(), - false - ) { - @Override - protected BytesRef convert(BytesRef value) { - return value; - } + if (hasDocValues) { + return new SortedSetDocValuesSyntheticFieldLoader( + fullPath(), + simpleName, + fieldType().ignoreAbove == Defaults.IGNORE_ABOVE ? null : originalName(), + false + ) { - @Override - protected BytesRef preserve(BytesRef value) { - // Preserve must make a deep copy because convert gets a shallow copy from the iterator - return BytesRef.deepCopyOf(value); - } - }; + @Override + protected BytesRef convert(BytesRef value) { + return value; + } + + @Override + protected BytesRef preserve(BytesRef value) { + // Preserve must make a deep copy because convert gets a shallow copy from the iterator + return BytesRef.deepCopyOf(value); + } + }; + } + + return super.syntheticFieldLoader(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index a554e6e44a8e8..aec0c580f1c51 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.query.DistanceFeatureQueryBuilder; @@ -693,6 +694,11 @@ public interface BlockLoaderContext { */ String indexName(); + /** + * The index settings of the index + */ + IndexSettings indexSettings(); + /** * How the field should be extracted into the BlockLoader. The default is {@link FieldExtractPreference#NONE}, which means * that the field type can choose where to load the field from. However, in some cases, the caller may have a preference. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 7c047125a80d3..9c60ee96c6fd4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -24,22 +24,21 @@ public abstract class Mapper implements ToXContentFragment, Iterable { public abstract static class Builder { - private String name; + private String leafName; - protected Builder(String name) { - setName(name); + protected Builder(String leafName) { + setLeafName(leafName); } - // TODO rename this to leafName? - public final String name() { - return this.name; + public final String leafName() { + return this.leafName; } /** Returns a newly built mapper. */ public abstract Mapper build(MapperBuilderContext context); - void setName(String name) { - this.name = internFieldName(name); + void setLeafName(String leafName) { + this.leafName = internFieldName(leafName); } } @@ -54,23 +53,24 @@ default boolean supportsVersion(IndexVersion indexCreatedVersion) { } } - private final String simpleName; + private final String leafName; - public Mapper(String simpleName) { - Objects.requireNonNull(simpleName); - this.simpleName = internFieldName(simpleName); + public Mapper(String leafName) { + Objects.requireNonNull(leafName); + this.leafName = internFieldName(leafName); } - /** Returns the simple name, which identifies this mapper against other mappers at the same level in the mappers hierarchy - * TODO: make this protected once Mapper and FieldMapper are merged together */ - // TODO rename this to leafName? - public final String simpleName() { - return simpleName; + /** + * Returns the name of the field. + * When the field has a parent object, its leaf name won't include the entire path. + * When subobjects are disabled, its leaf name will be the same as {@link #fullPath()} in practice, because its parent is the root. + */ + public final String leafName() { + return leafName; } /** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */ - // TODO rename this to fullPath??? - public abstract String name(); + public abstract String fullPath(); /** * Returns a name representing the type of this mapper. @@ -98,7 +98,7 @@ public final String simpleName() { * fields properly. */ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - throw new IllegalArgumentException("field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source"); + throw new IllegalArgumentException("field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source"); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java index fa501a31045e7..ceb1749101d8c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java @@ -27,7 +27,7 @@ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDat } public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDataStream, MergeReason mergeReason) { - return new MapperBuilderContext(null, isSourceSynthetic, isDataStream, false, ObjectMapper.Defaults.DYNAMIC, mergeReason); + return new MapperBuilderContext(null, isSourceSynthetic, isDataStream, false, ObjectMapper.Defaults.DYNAMIC, mergeReason, false); } private final String path; @@ -36,6 +36,7 @@ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDat private final boolean parentObjectContainsDimensions; private final ObjectMapper.Dynamic dynamic; private final MergeReason mergeReason; + private final boolean inNestedContext; MapperBuilderContext( String path, @@ -43,7 +44,8 @@ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDat boolean isDataStream, boolean parentObjectContainsDimensions, ObjectMapper.Dynamic dynamic, - MergeReason mergeReason + MergeReason mergeReason, + boolean inNestedContext ) { Objects.requireNonNull(dynamic, "dynamic must not be null"); this.path = path; @@ -52,6 +54,7 @@ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDat this.parentObjectContainsDimensions = parentObjectContainsDimensions; this.dynamic = dynamic; this.mergeReason = mergeReason; + this.inNestedContext = inNestedContext; } /** @@ -84,7 +87,8 @@ public MapperBuilderContext createChildContext( this.isDataStream, parentObjectContainsDimensions, getDynamic(dynamic), - this.mergeReason + this.mergeReason, + isInNestedContext() ); } @@ -134,4 +138,11 @@ public ObjectMapper.Dynamic getDynamic() { public MergeReason getMergeReason() { return mergeReason; } + + /** + * Returns true if this context is included in a nested context, either directly or any of its ancestors. + */ + public boolean isInNestedContext() { + return inNestedContext; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index ab5e731c1430a..a5f54ab89602b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -25,7 +25,11 @@ public Set getFeatures() { PassThroughObjectMapper.PASS_THROUGH_PRIORITY, RangeFieldMapper.NULL_VALUES_OFF_BY_ONE_FIX, SourceFieldMapper.SYNTHETIC_SOURCE_FALLBACK, - DenseVectorFieldMapper.INT4_QUANTIZATION + DenseVectorFieldMapper.INT4_QUANTIZATION, + DenseVectorFieldMapper.BIT_VECTORS, + DocumentMapper.INDEX_SORTING_ON_NESTED, + KeywordFieldMapper.KEYWORD_DIMENSION_IGNORE_ABOVE, + IndexModeFieldMapper.QUERYING_INDEX_MODE ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index acfe0fcfbf5bd..0d8bea4d70ce3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -52,11 +52,11 @@ public Mapping(RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadata for (int i = 0; i < metadataMappers.length; i++) { MetadataFieldMapper metadataMapper = metadataMappers[i]; metadataMappersMap[i] = Map.entry(metadataMapper.getClass(), metadataMapper); - metadataMappersByName[i] = Map.entry(metadataMapper.name(), metadataMapper); + metadataMappersByName[i] = Map.entry(metadataMapper.fullPath(), metadataMapper); } this.root = rootObjectMapper; // keep root mappers sorted for consistent serialization - Arrays.sort(metadataMappers, Comparator.comparing(Mapper::name)); + Arrays.sort(metadataMappers, Comparator.comparing(Mapper::fullPath)); this.metadataMappersMap = Map.ofEntries(metadataMappersMap); this.metadataMappersByName = Map.ofEntries(metadataMappersByName); this.meta = meta; @@ -70,7 +70,7 @@ public CompressedXContent toCompressedXContent() { try { return new CompressedXContent(this); } catch (Exception e) { - throw new ElasticsearchGenerationException("failed to serialize source for type [" + root.name() + "]", e); + throw new ElasticsearchGenerationException("failed to serialize source for type [" + root.fullPath() + "]", e); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 83e6984285749..ffb32f2c9b2a4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -163,27 +163,27 @@ private MappingLookup( final Set completionFields = new HashSet<>(); final List indexTimeScriptMappers = new ArrayList<>(); for (FieldMapper mapper : mappers) { - if (objects.containsKey(mapper.name())) { - throw new MapperParsingException("Field [" + mapper.name() + "] is defined both as an object and a field"); + if (objects.containsKey(mapper.fullPath())) { + throw new MapperParsingException("Field [" + mapper.fullPath() + "] is defined both as an object and a field"); } - if (fieldMappers.put(mapper.name(), mapper) != null) { - throw new MapperParsingException("Field [" + mapper.name() + "] is defined more than once"); + if (fieldMappers.put(mapper.fullPath(), mapper) != null) { + throw new MapperParsingException("Field [" + mapper.fullPath() + "] is defined more than once"); } indexAnalyzersMap.putAll(mapper.indexAnalyzers()); if (mapper.hasScript()) { indexTimeScriptMappers.add(mapper); } if (mapper instanceof CompletionFieldMapper) { - completionFields.add(mapper.name()); + completionFields.add(mapper.fullPath()); } } for (FieldAliasMapper aliasMapper : aliasMappers) { - if (objects.containsKey(aliasMapper.name())) { - throw new MapperParsingException("Alias [" + aliasMapper.name() + "] is defined both as an object and an alias"); + if (objects.containsKey(aliasMapper.fullPath())) { + throw new MapperParsingException("Alias [" + aliasMapper.fullPath() + "] is defined both as an object and an alias"); } - if (fieldMappers.put(aliasMapper.name(), aliasMapper) != null) { - throw new MapperParsingException("Alias [" + aliasMapper.name() + "] is defined both as an alias and a concrete field"); + if (fieldMappers.put(aliasMapper.fullPath(), aliasMapper) != null) { + throw new MapperParsingException("Alias [" + aliasMapper.fullPath() + "] is defined both as an alias and a concrete field"); } } @@ -194,7 +194,7 @@ private MappingLookup( Map inferenceFields = new HashMap<>(); for (FieldMapper mapper : mappers) { if (mapper instanceof InferenceFieldMapper inferenceFieldMapper) { - inferenceFields.put(mapper.name(), inferenceFieldMapper.getMetadata(fieldTypeLookup.sourcePaths(mapper.name()))); + inferenceFields.put(mapper.fullPath(), inferenceFieldMapper.getMetadata(fieldTypeLookup.sourcePaths(mapper.fullPath()))); } } this.inferenceFields = Map.copyOf(inferenceFields); @@ -225,8 +225,8 @@ private static boolean assertMapperNamesInterned(Map mappers, Ma private static void assertNamesInterned(String name, Mapper mapper) { assert name == name.intern(); - assert mapper.name() == mapper.name().intern(); - assert mapper.simpleName() == mapper.simpleName().intern(); + assert mapper.fullPath() == mapper.fullPath().intern(); + assert mapper.leafName() == mapper.leafName().intern(); if (mapper instanceof ObjectMapper) { ((ObjectMapper) mapper).mappers.forEach(MappingLookup::assertNamesInterned); } @@ -362,7 +362,7 @@ private void checkFieldNameLengthLimit(long limit) { private static void validateMapperNameIn(Collection mappers, long limit) { for (Mapper mapper : mappers) { - String name = mapper.simpleName(); + String name = mapper.leafName(); if (name.length() > limit) { throw new IllegalArgumentException("Field name [" + name + "] is longer than the limit of [" + limit + "] characters"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 93ffbbf552071..d0c84c1acd465 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -188,7 +188,7 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) if (mergeBuilder == null || mergeBuilder.isConfigured() == false) { return builder; } - builder.startObject(simpleName()); + builder.startObject(leafName()); getMergeBuilder().toXContent(builder, params); return builder.endObject(); } @@ -197,7 +197,7 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) protected void parseCreateField(DocumentParserContext context) throws IOException { throw new DocumentParsingException( context.parser().getTokenLocation(), - "Field [" + name() + "] is a metadata field and cannot be added inside a document. Use the index API request parameters." + "Field [" + fullPath() + "] is a metadata field and cannot be added inside a document. Use the index API request parameters." ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java index 911c6e5382a86..9c6b53fe58a2d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedLookup.java @@ -82,20 +82,20 @@ static NestedLookup build(List mappers) { if (mappers == null || mappers.isEmpty()) { return NestedLookup.EMPTY; } - mappers = mappers.stream().sorted(Comparator.comparing(ObjectMapper::name)).toList(); + mappers = mappers.stream().sorted(Comparator.comparing(ObjectMapper::fullPath)).toList(); Map parentFilters = new HashMap<>(); Map mappersByName = new HashMap<>(); NestedObjectMapper previous = null; for (NestedObjectMapper mapper : mappers) { - mappersByName.put(mapper.name(), mapper); + mappersByName.put(mapper.fullPath(), mapper); if (previous != null) { - if (mapper.name().startsWith(previous.name() + ".")) { - parentFilters.put(previous.name(), previous.nestedTypeFilter()); + if (mapper.fullPath().startsWith(previous.fullPath() + ".")) { + parentFilters.put(previous.fullPath(), previous.nestedTypeFilter()); } } previous = mapper; } - List nestedPathNames = mappers.stream().map(NestedObjectMapper::name).toList(); + List nestedPathNames = mappers.stream().map(NestedObjectMapper::fullPath).toList(); return new NestedLookup() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 4bc633296a832..76212f9899f5c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -82,7 +82,7 @@ public NestedObjectMapper build(MapperBuilderContext context) { } parentTypeFilter = Queries.newNonNestedFilter(indexCreatedVersion); } - final String fullPath = context.buildFullName(name()); + final String fullPath = context.buildFullName(leafName()); final String nestedTypePath; if (indexCreatedVersion.before(IndexVersions.V_8_0_0)) { nestedTypePath = "__" + fullPath; @@ -91,14 +91,17 @@ public NestedObjectMapper build(MapperBuilderContext context) { } final Query nestedTypeFilter = NestedPathFieldMapper.filter(indexCreatedVersion, nestedTypePath); NestedMapperBuilderContext nestedContext = new NestedMapperBuilderContext( - context.buildFullName(name()), + context.buildFullName(leafName()), + context.isSourceSynthetic(), + context.isDataStream(), + context.parentObjectContainsDimensions(), nestedTypeFilter, parentIncludedInRoot, context.getDynamic(dynamic), context.getMergeReason() ); return new NestedObjectMapper( - name(), + leafName(), fullPath, buildMappers(nestedContext), enabled, @@ -147,18 +150,21 @@ protected static void parseNested(String name, Map node, NestedO } } - private static class NestedMapperBuilderContext extends MapperBuilderContext { + static class NestedMapperBuilderContext extends MapperBuilderContext { final boolean parentIncludedInRoot; final Query nestedTypeFilter; NestedMapperBuilderContext( String path, + boolean isSourceSynthetic, + boolean isDataStream, + boolean parentObjectContainsDimensions, Query nestedTypeFilter, boolean parentIncludedInRoot, Dynamic dynamic, MapperService.MergeReason mergeReason ) { - super(path, false, false, false, dynamic, mergeReason); + super(path, isSourceSynthetic, isDataStream, parentObjectContainsDimensions, dynamic, mergeReason, true); this.parentIncludedInRoot = parentIncludedInRoot; this.nestedTypeFilter = nestedTypeFilter; } @@ -167,6 +173,9 @@ private static class NestedMapperBuilderContext extends MapperBuilderContext { public MapperBuilderContext createChildContext(String name, Dynamic dynamic) { return new NestedMapperBuilderContext( buildFullName(name), + isSourceSynthetic(), + isDataStream(), + parentObjectContainsDimensions(), nestedTypeFilter, parentIncludedInRoot, getDynamic(dynamic), @@ -244,7 +253,7 @@ public Map getChildren() { @Override public ObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { - NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder(simpleName(), indexVersionCreated, bitsetProducer); + NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder(leafName(), indexVersionCreated, bitsetProducer); builder.enabled = enabled; builder.dynamic = dynamic; builder.includeInRoot = includeInRoot; @@ -255,7 +264,7 @@ public ObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { @Override NestedObjectMapper withoutMappers() { return new NestedObjectMapper( - simpleName(), + leafName(), fullPath(), Map.of(), enabled, @@ -272,7 +281,7 @@ NestedObjectMapper withoutMappers() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(simpleName()); + builder.startObject(leafName()); builder.field("type", CONTENT_TYPE); if (includeInParent.explicit() && includeInParent.value()) { builder.field("include_in_parent", includeInParent.value()); @@ -296,7 +305,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContext) { if ((mergeWith instanceof NestedObjectMapper) == false) { - MapperErrors.throwNestedMappingConflictError(mergeWith.name()); + MapperErrors.throwNestedMappingConflictError(mergeWith.fullPath()); } NestedObjectMapper mergeWithObject = (NestedObjectMapper) mergeWith; @@ -330,7 +339,7 @@ public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContex } } return new NestedObjectMapper( - simpleName(), + leafName(), fullPath(), mergeResult.mappers(), mergeResult.enabled(), @@ -355,6 +364,9 @@ protected MapperMergeContext createChildContext(MapperMergeContext mapperMergeCo return mapperMergeContext.createChildContext( new NestedMapperBuilderContext( mapperBuilderContext.buildFullName(name), + mapperBuilderContext.isSourceSynthetic(), + mapperBuilderContext.isDataStream(), + mapperBuilderContext.parentObjectContainsDimensions(), nestedTypeFilter, parentIncludedInRoot, mapperBuilderContext.getDynamic(dynamic), @@ -451,12 +463,12 @@ public boolean hasValue() { public void write(XContentBuilder b) throws IOException { assert (children != null && children.size() > 0); if (children.size() == 1) { - b.startObject(simpleName()); + b.startObject(leafName()); leafStoredFieldLoader.advanceTo(children.get(0)); leafSourceLoader.write(leafStoredFieldLoader, children.get(0), b); b.endObject(); } else { - b.startArray(simpleName()); + b.startArray(leafName()); for (int childId : children) { b.startObject(); leafStoredFieldLoader.advanceTo(childId); @@ -469,7 +481,7 @@ public void write(XContentBuilder b) throws IOException { @Override public String fieldName() { - return name(); + return NestedObjectMapper.this.fullPath(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index fce0fb7a83ae4..1e5143a58f20a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -221,7 +221,7 @@ private FieldValues scriptValues() { if (this.script.get() == null) { return null; } - return type.compile(name(), script.get(), scriptCompiler); + return type.compile(leafName(), script.get(), scriptCompiler); } public Builder dimension(boolean dimension) { @@ -265,8 +265,15 @@ public NumberFieldMapper build(MapperBuilderContext context) { dimension.setValue(true); } - MappedFieldType ft = new NumberFieldType(context.buildFullName(name()), this); - return new NumberFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, context.isSourceSynthetic(), this); + MappedFieldType ft = new NumberFieldType(context.buildFullName(leafName()), this); + return new NumberFieldMapper( + leafName(), + ft, + multiFieldsBuilder.build(this, context), + copyTo, + context.isSourceSynthetic(), + this + ); } } @@ -1893,7 +1900,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio context.addIgnoredField(mappedFieldType.name()); if (storeMalformedFields) { // Save a copy of the field so synthetic source can load it - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), context.parser())); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser())); } return; } else { @@ -1963,7 +1970,7 @@ protected void indexScriptValues( @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), type, scriptCompiler, ignoreMalformedByDefault, coerceByDefault, indexCreatedVersion, indexMode) + return new Builder(leafName(), type, scriptCompiler, ignoreMalformedByDefault, coerceByDefault, indexCreatedVersion, indexMode) .dimension(dimension) .metric(metricType) .allowMultipleValues(allowMultipleValues) @@ -1972,16 +1979,20 @@ public FieldMapper.Builder getMergeBuilder() { @Override public void doValidate(MappingLookup lookup) { - if (dimension && null != lookup.nestedLookup().getNestedParent(name())) { + if (dimension && null != lookup.nestedLookup().getNestedParent(fullPath())) { throw new IllegalArgumentException( - TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + name() + "]" + TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + fullPath() + "]" ); } } @Override protected SyntheticSourceMode syntheticSourceMode() { - return SyntheticSourceMode.NATIVE; + if (hasDocValues) { + return SyntheticSourceMode.NATIVE; + } + + return SyntheticSourceMode.FALLBACK; } @Override @@ -1989,17 +2000,16 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (hasScript()) { return SourceLoader.SyntheticFieldLoader.NOTHING; } - if (hasDocValues == false) { - throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" - ); - } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return type.syntheticFieldLoader(name(), simpleName(), ignoreMalformed.value()); + if (hasDocValues) { + return type.syntheticFieldLoader(fullPath(), leafName(), ignoreMalformed.value()); + } + + return super.syntheticFieldLoader(); } // For testing only: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 356c103756bac..a3d5999a3dcd2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -166,7 +166,7 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil Map mappers = new HashMap<>(); for (Mapper.Builder builder : mappersBuilders) { Mapper mapper = builder.build(mapperBuilderContext); - Mapper existing = mappers.get(mapper.simpleName()); + Mapper existing = mappers.get(mapper.leafName()); if (existing != null) { // The same mappings or document may hold the same field twice, either because duplicated JSON keys are allowed or // the same field is provided using the object notation as well as the dot notation at the same time. @@ -177,9 +177,9 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil } if (subobjects.value() == false && mapper instanceof ObjectMapper objectMapper) { // We're parsing a mapping that has set `subobjects: false` but has defined sub-objects - objectMapper.asFlattenedFieldMappers(mapperBuilderContext).forEach(m -> mappers.put(m.simpleName(), m)); + objectMapper.asFlattenedFieldMappers(mapperBuilderContext).forEach(m -> mappers.put(m.leafName(), m)); } else { - mappers.put(mapper.simpleName(), mapper); + mappers.put(mapper.leafName(), mapper); } } return mappers; @@ -188,13 +188,13 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil @Override public ObjectMapper build(MapperBuilderContext context) { return new ObjectMapper( - name(), - context.buildFullName(name()), + leafName(), + context.buildFullName(leafName()), enabled, subobjects, storeArraySource, dynamic, - buildMappers(context.createChildContext(name(), dynamic)) + buildMappers(context.createChildContext(leafName(), dynamic)) ); } } @@ -325,7 +325,7 @@ protected static void parseProperties( "Tried to add nested object [" + fieldName + "] to object [" - + objBuilder.name() + + objBuilder.leafName() + "] which does not support subobjects" ); } @@ -418,7 +418,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate * @return a Builder that will produce an empty ObjectMapper with the same configuration as this one */ public ObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { - ObjectMapper.Builder builder = new ObjectMapper.Builder(simpleName(), subobjects); + ObjectMapper.Builder builder = new ObjectMapper.Builder(leafName(), subobjects); builder.enabled = this.enabled; builder.dynamic = this.dynamic; return builder; @@ -429,11 +429,11 @@ public ObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { * This is typically used in the context of a mapper merge when there's not enough budget to add the entire object. */ ObjectMapper withoutMappers() { - return new ObjectMapper(simpleName(), fullPath, enabled, subobjects, storeArraySource, dynamic, Map.of()); + return new ObjectMapper(leafName(), fullPath, enabled, subobjects, storeArraySource, dynamic, Map.of()); } @Override - public String name() { + public String fullPath() { return this.fullPath; } @@ -459,10 +459,6 @@ public Iterator iterator() { return mappers.values().iterator(); } - public String fullPath() { - return this.fullPath; - } - public final Dynamic dynamic() { return dynamic; } @@ -492,15 +488,15 @@ protected MapperMergeContext createChildContext(MapperMergeContext mapperMergeCo @Override public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContext) { if (mergeWith instanceof ObjectMapper == false) { - MapperErrors.throwObjectMappingConflictError(mergeWith.name()); + MapperErrors.throwObjectMappingConflictError(mergeWith.fullPath()); } if (this instanceof NestedObjectMapper == false && mergeWith instanceof NestedObjectMapper) { // TODO stop NestedObjectMapper extending ObjectMapper? - MapperErrors.throwNestedMappingConflictError(mergeWith.name()); + MapperErrors.throwNestedMappingConflictError(mergeWith.fullPath()); } var mergeResult = MergeResult.build(this, (ObjectMapper) mergeWith, parentMergeContext); return new ObjectMapper( - simpleName(), + leafName(), fullPath, mergeResult.enabled, mergeResult.subObjects, @@ -524,7 +520,9 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma if (reason == MergeReason.INDEX_TEMPLATE) { enabled = mergeWithObject.enabled; } else if (existing.isEnabled() != mergeWithObject.isEnabled()) { - throw new MapperException("the [enabled] parameter can't be updated for the object mapping [" + existing.name() + "]"); + throw new MapperException( + "the [enabled] parameter can't be updated for the object mapping [" + existing.fullPath() + "]" + ); } else { enabled = existing.enabled; } @@ -537,7 +535,7 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma subObjects = mergeWithObject.subobjects; } else if (existing.subobjects != mergeWithObject.subobjects) { throw new MapperException( - "the [subobjects] parameter can't be updated for the object mapping [" + existing.name() + "]" + "the [subobjects] parameter can't be updated for the object mapping [" + existing.fullPath() + "]" ); } else { subObjects = existing.subobjects; @@ -551,7 +549,7 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma trackArraySource = mergeWithObject.storeArraySource; } else if (existing.storeArraySource != mergeWithObject.storeArraySource) { throw new MapperException( - "the [store_array_source] parameter can't be updated for the object mapping [" + existing.name() + "]" + "the [store_array_source] parameter can't be updated for the object mapping [" + existing.fullPath() + "]" ); } else { trackArraySource = existing.storeArraySource; @@ -559,7 +557,7 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma } else { trackArraySource = existing.storeArraySource; } - MapperMergeContext objectMergeContext = existing.createChildContext(parentMergeContext, existing.simpleName()); + MapperMergeContext objectMergeContext = existing.createChildContext(parentMergeContext, existing.leafName()); Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects.value()); return new MergeResult( enabled, @@ -581,13 +579,13 @@ private static Map buildMergedMappers( if (subobjects == false && childOfExistingMapper instanceof ObjectMapper objectMapper) { // An existing mapping with sub-objects is merged with a mapping that has set `subobjects: false` objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) - .forEach(m -> mergedMappers.put(m.simpleName(), m)); + .forEach(m -> mergedMappers.put(m.leafName(), m)); } else { putMergedMapper(mergedMappers, childOfExistingMapper); } } for (Mapper mergeWithMapper : mergeWithObject) { - Mapper mergeIntoMapper = mergedMappers.get(mergeWithMapper.simpleName()); + Mapper mergeIntoMapper = mergedMappers.get(mergeWithMapper.leafName()); if (mergeIntoMapper == null) { if (subobjects == false && mergeWithMapper instanceof ObjectMapper objectMapper) { // An existing mapping that has set `subobjects: false` is merged with a mapping with sub-objects @@ -606,9 +604,9 @@ private static Map buildMergedMappers( } else { assert mergeIntoMapper instanceof FieldMapper || mergeIntoMapper instanceof FieldAliasMapper; if (mergeWithMapper instanceof NestedObjectMapper) { - MapperErrors.throwNestedMappingConflictError(mergeWithMapper.name()); + MapperErrors.throwNestedMappingConflictError(mergeWithMapper.fullPath()); } else if (mergeWithMapper instanceof ObjectMapper) { - MapperErrors.throwObjectMappingConflictError(mergeWithMapper.name()); + MapperErrors.throwObjectMappingConflictError(mergeWithMapper.fullPath()); } // If we're merging template mappings when creating an index, then a field definition always @@ -625,7 +623,7 @@ private static Map buildMergedMappers( private static void putMergedMapper(Map mergedMappers, @Nullable Mapper merged) { if (merged != null) { - mergedMappers.put(merged.simpleName(), merged); + mergedMappers.put(merged.leafName(), merged); } } @@ -656,11 +654,11 @@ List asFlattenedFieldMappers(MapperBuilderContext context) { private void asFlattenedFieldMappers(MapperBuilderContext context, List flattenedMappers, ContentPath path) { ensureFlattenable(context, path); - path.add(simpleName()); + path.add(leafName()); for (Mapper mapper : mappers.values()) { if (mapper instanceof FieldMapper fieldMapper) { FieldMapper.Builder fieldBuilder = fieldMapper.getMergeBuilder(); - fieldBuilder.setName(path.pathAsText(mapper.simpleName())); + fieldBuilder.setLeafName(path.pathAsText(mapper.leafName())); flattenedMappers.add(fieldBuilder.build(context)); } else if (mapper instanceof ObjectMapper objectMapper) { objectMapper.asFlattenedFieldMappers(context, flattenedMappers, path); @@ -691,10 +689,10 @@ private void ensureFlattenable(MapperBuilderContext context, ContentPath path) { private void throwAutoFlatteningException(ContentPath path, String reason) { throw new IllegalArgumentException( "Object mapper [" - + path.pathAsText(simpleName()) + + path.pathAsText(leafName()) + "] was found in a context where subobjects is set to false. " + "Auto-flattening [" - + path.pathAsText(simpleName()) + + path.pathAsText(leafName()) + "] failed because " + reason ); @@ -707,7 +705,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } void toXContent(XContentBuilder builder, Params params, ToXContent custom) throws IOException { - builder.startObject(simpleName()); + builder.startObject(leafName()); if (mappers.isEmpty() && custom == null) { // only write the object content type if there are no properties, otherwise, it is automatically detected builder.field("type", CONTENT_TYPE); @@ -736,7 +734,7 @@ void toXContent(XContentBuilder builder, Params params, ToXContent custom) throw protected void serializeMappers(XContentBuilder builder, Params params) throws IOException { // sort the mappers so we get consistent serialization format Mapper[] sortedMappers = mappers.values().toArray(Mapper[]::new); - Arrays.sort(sortedMappers, Comparator.comparing(Mapper::name)); + Arrays.sort(sortedMappers, Comparator.comparing(Mapper::fullPath)); int count = 0; for (Mapper mapper : sortedMappers) { @@ -757,7 +755,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep } protected SourceLoader.SyntheticFieldLoader syntheticFieldLoader(Stream mappers, boolean isFragment) { - var fields = mappers.sorted(Comparator.comparing(Mapper::name)) + var fields = mappers.sorted(Comparator.comparing(Mapper::fullPath)) .map(Mapper::syntheticFieldLoader) .filter(l -> l != SourceLoader.SyntheticFieldLoader.NOTHING) .toList(); @@ -849,7 +847,7 @@ public void write(XContentBuilder b) throws IOException { if (isRoot()) { b.startObject(); } else { - b.startObject(simpleName()); + b.startObject(leafName()); } } @@ -887,7 +885,7 @@ public boolean setIgnoredValues(Map node, Pa public static void checkForDuplicatePriorities(Collection passThroughMappers) { Map seen = new HashMap<>(); for (PassThroughObjectMapper mapper : passThroughMappers) { - String conflict = seen.put(mapper.priority, mapper.name()); + String conflict = seen.put(mapper.priority, mapper.fullPath()); if (conflict != null) { throw new MapperException( "Pass-through object [" - + mapper.name() + + mapper.fullPath() + "] has a conflicting param [priority=" + mapper.priority + "] with object [" diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java index 67260273bc5a5..4a12ed77b4f26 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PlaceHolderFieldMapper.java @@ -90,8 +90,8 @@ protected Parameter[] getParameters() { @Override public PlaceHolderFieldMapper build(MapperBuilderContext context) { - PlaceHolderFieldType mappedFieldType = new PlaceHolderFieldType(context.buildFullName(name()), type, Map.of()); - return new PlaceHolderFieldMapper(name(), mappedFieldType, multiFieldsBuilder.build(this, context), copyTo, unknownParams); + PlaceHolderFieldType mappedFieldType = new PlaceHolderFieldType(context.buildFullName(leafName()), type, Map.of()); + return new PlaceHolderFieldMapper(leafName(), mappedFieldType, multiFieldsBuilder.build(this, context), copyTo, unknownParams); } } @@ -277,7 +277,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio @Override public FieldMapper.Builder getMergeBuilder() { - return new PlaceHolderFieldMapper.Builder(simpleName(), typeName()).init(this); + return new PlaceHolderFieldMapper.Builder(leafName(), typeName()).init(this); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 2e826c1294d60..e80da7ce8a763 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -120,12 +120,12 @@ protected Parameter[] getParameters() { } protected RangeFieldType setupFieldType(MapperBuilderContext context) { - String fullName = context.buildFullName(name()); + String fullName = context.buildFullName(leafName()); if (format.isConfigured()) { if (type != RangeType.DATE) { throw new IllegalArgumentException( "field [" - + name() + + leafName() + "] of type [range]" + " should not define a dateTimeFormatter unless it is a " + RangeType.DATE @@ -167,7 +167,7 @@ protected RangeFieldType setupFieldType(MapperBuilderContext context) { @Override public RangeFieldMapper build(MapperBuilderContext context) { RangeFieldType ft = setupFieldType(context); - return new RangeFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, type, this); + return new RangeFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, type, this); } } @@ -364,7 +364,7 @@ boolean coerce() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), type, coerceByDefault).init(this); + return new Builder(leafName(), type, coerceByDefault).init(this); } @Override @@ -390,7 +390,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } Range range = parseRange(parser); - context.doc().addAll(fieldType().rangeType.createFields(context, name(), range, index, hasDocValues, store)); + context.doc().addAll(fieldType().rangeType.createFields(context, fullPath(), range, index, hasDocValues, store)); if (hasDocValues == false && (index || store)) { context.addToFieldNames(fieldType().name()); @@ -406,7 +406,7 @@ private Range parseRange(XContentParser parser) throws IOException { if (start != XContentParser.Token.START_OBJECT) { throw new DocumentParsingException( parser.getTokenLocation(), - "error parsing field [" + name() + "], expected an object but got " + parser.currentName() + "error parsing field [" + fullPath() + "], expected an object but got " + parser.currentName() ); } @@ -445,7 +445,7 @@ private Range parseRange(XContentParser parser) throws IOException { } else { throw new DocumentParsingException( parser.getTokenLocation(), - "error parsing field [" + name() + "], with unknown parameter [" + fieldName + "]" + "error parsing field [" + fullPath() + "], with unknown parameter [" + fieldName + "]" ); } } @@ -471,15 +471,19 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (hasDocValues == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new BinaryDocValuesSyntheticFieldLoader(name()) { + return new BinaryDocValuesSyntheticFieldLoader(fullPath()) { @Override protected void writeValue(XContentBuilder b, BytesRef value) throws IOException { List ranges = type.decodeRanges(value); @@ -488,11 +492,11 @@ protected void writeValue(XContentBuilder b, BytesRef value) throws IOException case 0: return; case 1: - b.field(simpleName()); + b.field(leafName()); ranges.get(0).toXContent(b, fieldType().dateTimeFormatter); break; default: - b.startArray(simpleName()); + b.startArray(leafName()); for (var range : ranges) { range.toXContent(b, fieldType().dateTimeFormatter); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index ea00901bf681f..11aabd8726f4f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -108,7 +108,7 @@ public RootObjectMapper.Builder addRuntimeFields(Map runti @Override public RootObjectMapper build(MapperBuilderContext context) { return new RootObjectMapper( - name(), + leafName(), enabled, subobjects, storeArraySource, @@ -152,7 +152,7 @@ public RootObjectMapper build(MapperBuilderContext context) { @Override public RootObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { - RootObjectMapper.Builder builder = new RootObjectMapper.Builder(name(), subobjects); + RootObjectMapper.Builder builder = new RootObjectMapper.Builder(this.fullPath(), subobjects); builder.enabled = enabled; builder.dynamic = dynamic; return builder; @@ -161,7 +161,7 @@ public RootObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { @Override RootObjectMapper withoutMappers() { return new RootObjectMapper( - simpleName(), + leafName(), enabled, subobjects, storeArraySource, @@ -220,7 +220,7 @@ protected MapperMergeContext createChildContext(MapperMergeContext mapperMergeCo @Override public RootObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContext) { if (mergeWith instanceof RootObjectMapper == false) { - MapperErrors.throwObjectMappingConflictError(mergeWith.name()); + MapperErrors.throwObjectMappingConflictError(mergeWith.fullPath()); } RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; @@ -277,7 +277,7 @@ public RootObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeCo } return new RootObjectMapper( - simpleName(), + leafName(), mergeResult.enabled(), mergeResult.subObjects(), mergeResult.trackArraySource(), diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 04d158c5ee99c..1f0c920c39c8f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -374,18 +374,18 @@ private TextFieldType buildFieldType( if (analyzers.positionIncrementGap.isConfigured()) { if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { throw new IllegalArgumentException( - "Cannot set position_increment_gap on field [" + name() + "] without positions enabled" + "Cannot set position_increment_gap on field [" + leafName() + "] without positions enabled" ); } } TextSearchInfo tsi = new TextSearchInfo(fieldType, similarity.getValue(), searchAnalyzer, searchQuoteAnalyzer); TextFieldType ft; if (indexCreatedVersion.isLegacyIndexVersion()) { - ft = new LegacyTextFieldType(context.buildFullName(name()), index.getValue(), store.getValue(), tsi, meta.getValue()); + ft = new LegacyTextFieldType(context.buildFullName(leafName()), index.getValue(), store.getValue(), tsi, meta.getValue()); // ignore fieldData and eagerGlobalOrdinals } else { ft = new TextFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), index.getValue(), store.getValue(), tsi, @@ -407,7 +407,7 @@ private SubFieldInfo buildPrefixInfo(MapperBuilderContext context, FieldType fie return null; } if (index.getValue() == false) { - throw new IllegalArgumentException("Cannot set index_prefixes on unindexed field [" + name() + "]"); + throw new IllegalArgumentException("Cannot set index_prefixes on unindexed field [" + leafName() + "]"); } /* * Mappings before v7.2.1 use {@link Builder#name} instead of {@link Builder#fullName} @@ -416,7 +416,7 @@ private SubFieldInfo buildPrefixInfo(MapperBuilderContext context, FieldType fie * or a multi-field). This way search will continue to work on old indices and new indices * will use the expected full name. */ - String fullName = indexCreatedVersion.before(IndexVersions.V_7_2_1) ? name() : context.buildFullName(name()); + String fullName = indexCreatedVersion.before(IndexVersions.V_7_2_1) ? leafName() : context.buildFullName(leafName()); // Copy the index options of the main field to allow phrase queries on // the prefix field. FieldType pft = new FieldType(fieldType); @@ -448,10 +448,10 @@ private SubFieldInfo buildPhraseInfo(FieldType fieldType, TextFieldType parent) return null; } if (index.get() == false) { - throw new IllegalArgumentException("Cannot set index_phrases on unindexed field [" + name() + "]"); + throw new IllegalArgumentException("Cannot set index_phrases on unindexed field [" + leafName() + "]"); } if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { - throw new IllegalArgumentException("Cannot set index_phrases on field [" + name() + "] if positions are not enabled"); + throw new IllegalArgumentException("Cannot set index_phrases on field [" + leafName() + "] if positions are not enabled"); } FieldType phraseFieldType = new FieldType(fieldType); PhraseWrappedAnalyzer a = new PhraseWrappedAnalyzer( @@ -476,11 +476,11 @@ public TextFieldMapper build(MapperBuilderContext context) { SubFieldInfo phraseFieldInfo = buildPhraseInfo(fieldType, tft); SubFieldInfo prefixFieldInfo = buildPrefixInfo(context, fieldType, tft); for (Mapper mapper : multiFields) { - if (mapper.name().endsWith(FAST_PHRASE_SUFFIX) || mapper.name().endsWith(FAST_PREFIX_SUFFIX)) { - throw new MapperParsingException("Cannot use reserved field name [" + mapper.name() + "]"); + if (mapper.fullPath().endsWith(FAST_PHRASE_SUFFIX) || mapper.fullPath().endsWith(FAST_PREFIX_SUFFIX)) { + throw new MapperParsingException("Cannot use reserved field name [" + mapper.fullPath() + "]"); } } - return new TextFieldMapper(name(), fieldType, tft, prefixFieldInfo, phraseFieldInfo, multiFields, copyTo, this); + return new TextFieldMapper(leafName(), fieldType, tft, prefixFieldInfo, phraseFieldInfo, multiFields, copyTo, this); } } @@ -1223,7 +1223,7 @@ private TextFieldMapper( assert mappedFieldType.getTextSearchInfo().isTokenized(); assert mappedFieldType.hasDocValues() == false; if (fieldType.indexOptions() == IndexOptions.NONE && fieldType().fielddata()) { - throw new IllegalArgumentException("Cannot enable fielddata on a [text] field that is not indexed: [" + name() + "]"); + throw new IllegalArgumentException("Cannot enable fielddata on a [text] field that is not indexed: [" + fullPath() + "]"); } this.fieldType = freezeAndDeduplicateFieldType(fieldType); this.prefixFieldInfo = prefixFieldInfo; @@ -1247,7 +1247,7 @@ private TextFieldMapper( @Override public Map indexAnalyzers() { Map analyzersMap = new HashMap<>(); - analyzersMap.put(name(), indexAnalyzer); + analyzersMap.put(fullPath(), indexAnalyzer); if (phraseFieldInfo != null) { analyzersMap.put( phraseFieldInfo.field, @@ -1265,7 +1265,7 @@ public Map indexAnalyzers() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), indexCreatedVersion, indexAnalyzers, isSyntheticSourceEnabledViaIndexMode).init(this); + return new Builder(leafName(), indexCreatedVersion, indexAnalyzers, isSyntheticSourceEnabledViaIndexMode).init(this); } @Override @@ -1455,11 +1455,11 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } if (store) { - return new StringStoredFieldFieldLoader(name(), simpleName(), null) { + return new StringStoredFieldFieldLoader(fullPath(), leafName(), null) { @Override protected void write(XContentBuilder b, Object value) throws IOException { b.value((String) value); @@ -1469,7 +1469,7 @@ protected void write(XContentBuilder b, Object value) throws IOException { var kwd = SyntheticSourceHelper.getKeywordFieldMapperForSyntheticSource(this); if (kwd != null) { - return kwd.syntheticFieldLoader(simpleName()); + return kwd.syntheticFieldLoader(leafName()); } throw new IllegalArgumentException( @@ -1477,7 +1477,7 @@ protected void write(XContentBuilder b, Object value) throws IOException { Locale.ROOT, "field [%s] of type [%s] doesn't support synthetic source unless it is stored or has a sub-field of" + " type [keyword] with doc values or stored and without a normalizer", - name(), + fullPath(), typeName() ) ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java index 8101b5be1b60e..5bb8145a090a1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TsidExtractingIdFieldMapper.java @@ -46,13 +46,7 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext private static final long SEED = 0; public static void createField(DocumentParserContext context, IndexRouting.ExtractFromSource.Builder routingBuilder, BytesRef tsid) { - final IndexableField timestampField = context.rootDoc().getField(DataStreamTimestampFieldMapper.DEFAULT_PATH); - if (timestampField == null) { - throw new IllegalArgumentException( - "data stream timestamp field [" + DataStreamTimestampFieldMapper.DEFAULT_PATH + "] is missing" - ); - } - long timestamp = timestampField.numericValue().longValue(); + final long timestamp = DataStreamTimestampFieldMapper.extractTimestampValue(context.doc()); String id; if (routingBuilder != null) { byte[] suffix = new byte[16]; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java index 024cc5cb721e6..3887c5a15cd5a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java @@ -202,13 +202,13 @@ protected Parameter[] getParameters() { public FlattenedFieldMapper build(MapperBuilderContext context) { MultiFields multiFields = multiFieldsBuilder.build(this, context); if (multiFields.iterator().hasNext()) { - throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name() + "] does not support [fields]"); + throw new IllegalArgumentException(CONTENT_TYPE + " field [" + leafName() + "] does not support [fields]"); } if (copyTo.copyToFields().isEmpty() == false) { - throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name() + "] does not support [copy_to]"); + throw new IllegalArgumentException(CONTENT_TYPE + " field [" + leafName() + "] does not support [copy_to]"); } MappedFieldType ft = new RootFlattenedFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.get(), hasDocValues.get(), meta.get(), @@ -216,7 +216,7 @@ public FlattenedFieldMapper build(MapperBuilderContext context) { eagerGlobalOrdinals.get(), dimensions.get() ); - return new FlattenedFieldMapper(name(), ft, this); + return new FlattenedFieldMapper(leafName(), ft, this); } } @@ -275,6 +275,10 @@ public String typeName() { return CONTENT_TYPE; } + public String rootName() { + return this.rootName; + } + public String key() { return key; } @@ -733,8 +737,8 @@ public void validateMatchedRoutingPath(final String routingPath) { private final FlattenedFieldParser fieldParser; private final Builder builder; - private FlattenedFieldMapper(String simpleName, MappedFieldType mappedFieldType, Builder builder) { - super(simpleName, mappedFieldType, MultiFields.empty(), CopyTo.empty()); + private FlattenedFieldMapper(String leafName, MappedFieldType mappedFieldType, Builder builder) { + super(leafName, mappedFieldType, MultiFields.empty(), CopyTo.empty()); this.builder = builder; this.fieldParser = new FlattenedFieldParser( mappedFieldType.name(), @@ -801,7 +805,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override @@ -815,11 +819,11 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { return SourceLoader.SyntheticFieldLoader.NOTHING; } if (fieldType().hasDocValues()) { - return new FlattenedSortedSetDocValuesSyntheticFieldLoader(name() + "._keyed", simpleName()); + return new FlattenedSortedSetDocValuesSyntheticFieldLoader(fullPath(), fullPath() + "._keyed", leafName()); } throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java index d373d683b73ad..1e3d55c01d74c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java @@ -17,7 +17,11 @@ import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentParserContext; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.XContentDataHelper; +import org.elasticsearch.xcontent.CopyingXContentParser; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -32,8 +36,8 @@ class FlattenedFieldParser { static final String SEPARATOR = "\0"; private static final byte SEPARATOR_BYTE = '\0'; - private final String rootFieldName; - private final String keyedFieldName; + private final String rootFieldFullPath; + private final String keyedFieldFullPath; private final MappedFieldType fieldType; private final int depthLimit; @@ -41,35 +45,55 @@ class FlattenedFieldParser { private final String nullValue; FlattenedFieldParser( - String rootFieldName, - String keyedFieldName, + String rootFieldFullPath, + String keyedFieldFullPath, MappedFieldType fieldType, int depthLimit, int ignoreAbove, String nullValue ) { - this.rootFieldName = rootFieldName; - this.keyedFieldName = keyedFieldName; + this.rootFieldFullPath = rootFieldFullPath; + this.keyedFieldFullPath = keyedFieldFullPath; this.fieldType = fieldType; this.depthLimit = depthLimit; this.ignoreAbove = ignoreAbove; this.nullValue = nullValue; } - public List parse(final DocumentParserContext context) throws IOException { - XContentParser parser = context.parser(); + public List parse(final DocumentParserContext documentParserContext) throws IOException { + XContentParser parser = documentParserContext.parser(); XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + XContentBuilder rawDataForSyntheticSource = null; + if (documentParserContext.canAddIgnoredField() && ignoreAbove < Integer.MAX_VALUE) { + var copyingParser = new CopyingXContentParser(parser); + rawDataForSyntheticSource = copyingParser.getBuilder(); + parser = copyingParser; + } + ContentPath path = new ContentPath(); List fields = new ArrayList<>(); + var context = new Context(parser, documentParserContext); parseObject(context, path, fields); + if (rawDataForSyntheticSource != null && context.isIgnoredValueEncountered()) { + // One or more inner fields are ignored due to `ignore_above`. + // Because of that we will store whole object as is in order to generate synthetic source. + documentParserContext.addIgnoredField( + IgnoredSourceFieldMapper.NameValue.fromContext( + documentParserContext, + rootFieldFullPath, + XContentDataHelper.encodeXContentBuilder(rawDataForSyntheticSource) + ) + ); + } + return fields; } - private void parseObject(DocumentParserContext context, ContentPath path, List fields) throws IOException { + private void parseObject(Context context, ContentPath path, List fields) throws IOException { String currentName = null; - XContentParser parser = context.parser(); + XContentParser parser = context.getParser(); while (true) { XContentParser.Token token = parser.nextToken(); if (token == XContentParser.Token.END_OBJECT) { @@ -85,9 +109,8 @@ private void parseObject(DocumentParserContext context, ContentPath path, List fields) - throws IOException { - XContentParser parser = context.parser(); + private void parseArray(Context context, ContentPath path, String currentName, List fields) throws IOException { + XContentParser parser = context.getParser(); while (true) { XContentParser.Token token = parser.nextToken(); if (token == XContentParser.Token.END_ARRAY) { @@ -98,13 +121,13 @@ private void parseArray(DocumentParserContext context, ContentPath path, String } private void parseFieldValue( - DocumentParserContext context, + Context context, XContentParser.Token token, ContentPath path, String currentName, List fields ) throws IOException { - XContentParser parser = context.parser(); + XContentParser parser = context.getParser(); if (token == XContentParser.Token.START_OBJECT) { path.add(currentName); validateDepthLimit(path); @@ -126,8 +149,9 @@ private void parseFieldValue( } } - private void addField(DocumentParserContext context, ContentPath path, String currentName, String value, List fields) { + private void addField(Context context, ContentPath path, String currentName, String value, List fields) { if (value.length() > ignoreAbove) { + context.onIgnoredValue(); return; } @@ -143,7 +167,7 @@ private void addField(DocumentParserContext context, ContentPath path, String cu // in that case we can already throw a more user friendly exception here which includes the offending fields key and value lengths if (bytesKeyedValue.length > IndexWriter.MAX_TERM_LENGTH) { String msg = "Flattened field [" - + rootFieldName + + rootFieldFullPath + "] contains one immense field" + " whose keyed encoding is longer than the allowed max length of " + IndexWriter.MAX_TERM_LENGTH @@ -158,13 +182,13 @@ private void addField(DocumentParserContext context, ContentPath path, String cu } BytesRef bytesValue = new BytesRef(value); if (fieldType.isIndexed()) { - fields.add(new StringField(rootFieldName, bytesValue, Field.Store.NO)); - fields.add(new StringField(keyedFieldName, bytesKeyedValue, Field.Store.NO)); + fields.add(new StringField(rootFieldFullPath, bytesValue, Field.Store.NO)); + fields.add(new StringField(keyedFieldFullPath, bytesKeyedValue, Field.Store.NO)); } if (fieldType.hasDocValues()) { - fields.add(new SortedSetDocValuesField(rootFieldName, bytesValue)); - fields.add(new SortedSetDocValuesField(keyedFieldName, bytesKeyedValue)); + fields.add(new SortedSetDocValuesField(rootFieldFullPath, bytesValue)); + fields.add(new SortedSetDocValuesField(keyedFieldFullPath, bytesKeyedValue)); if (fieldType.isDimension() == false) { return; @@ -173,7 +197,10 @@ private void addField(DocumentParserContext context, ContentPath path, String cu final String keyedFieldName = FlattenedFieldParser.extractKey(bytesKeyedValue).utf8ToString(); if (fieldType.isDimension() && fieldType.dimensions().contains(keyedFieldName)) { final BytesRef keyedFieldValue = FlattenedFieldParser.extractValue(bytesKeyedValue); - context.getDimensions().addString(rootFieldName + "." + keyedFieldName, keyedFieldValue).validate(context.indexSettings()); + context.getDocumentParserContext() + .getDimensions() + .addString(rootFieldFullPath + "." + keyedFieldName, keyedFieldValue) + .validate(context.getDocumentParserContext().indexSettings()); } } } @@ -181,7 +208,7 @@ private void addField(DocumentParserContext context, ContentPath path, String cu private void validateDepthLimit(ContentPath path) { if (path.length() + 1 > depthLimit) { throw new IllegalArgumentException( - "The provided [flattened] field [" + rootFieldName + "] exceeds the maximum depth limit of [" + depthLimit + "]." + "The provided [flattened] field [" + rootFieldFullPath + "] exceeds the maximum depth limit of [" + depthLimit + "]." ); } } @@ -210,4 +237,33 @@ static BytesRef extractValue(BytesRef keyedValue) { int valueStart = keyedValue.offset + length + 1; return new BytesRef(keyedValue.bytes, valueStart, keyedValue.length - valueStart); } + + private static class Context { + private final XContentParser parser; + private final DocumentParserContext documentParserContext; + + private boolean ignoredValueEncountered; + + private Context(XContentParser parser, DocumentParserContext documentParserContext) { + this.parser = parser; + this.documentParserContext = documentParserContext; + this.ignoredValueEncountered = false; + } + + public XContentParser getParser() { + return parser; + } + + public DocumentParserContext getDocumentParserContext() { + return documentParserContext; + } + + public void onIgnoredValue() { + this.ignoredValueEncountered = true; + } + + public boolean isIgnoredValueEncountered() { + return ignoredValueEncountered; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java index 3eb8f4ac52eaa..959460758ab30 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedSortedSetDocValuesSyntheticFieldLoader.java @@ -19,24 +19,32 @@ public class FlattenedSortedSetDocValuesSyntheticFieldLoader extends SortedSetDocValuesSyntheticFieldLoader { private DocValuesFieldValues docValues = NO_VALUES; - private final String name; - private final String simpleName; + private final String fieldFullPath; + private final String keyedFieldFullPath; + private final String leafName; /** * Build a loader for flattened fields from doc values. * - * @param name the name of the field to load from doc values - * @param simpleName the name to give the field in the rendered {@code _source} + * @param fieldFullPath full path to the original field + * @param keyedFieldFullPath full path to the keyed field to load doc values from + * @param leafName the name of the leaf field to use in the rendered {@code _source} */ - public FlattenedSortedSetDocValuesSyntheticFieldLoader(String name, String simpleName) { - super(name, simpleName, null, false); - this.name = name; - this.simpleName = simpleName; + public FlattenedSortedSetDocValuesSyntheticFieldLoader(String fieldFullPath, String keyedFieldFullPath, String leafName) { + super(fieldFullPath, leafName, null, false); + this.fieldFullPath = fieldFullPath; + this.keyedFieldFullPath = keyedFieldFullPath; + this.leafName = leafName; + } + + @Override + public String fieldName() { + return fieldFullPath; } @Override public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { - final SortedSetDocValues dv = DocValues.getSortedSet(reader, name); + final SortedSetDocValues dv = DocValues.getSortedSet(reader, keyedFieldFullPath); if (dv.getValueCount() == 0) { docValues = NO_VALUES; return null; @@ -56,7 +64,7 @@ public void write(XContentBuilder b) throws IOException { if (docValues.count() == 0) { return; } - b.startObject(simpleName); + b.startObject(leafName); docValues.write(b); b.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 7d385c189479b..d27c0acdb6b2e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.VectorUtil; import org.elasticsearch.common.ParsingException; @@ -41,6 +42,8 @@ import org.elasticsearch.index.codec.vectors.ES813FlatVectorFormat; import org.elasticsearch.index.codec.vectors.ES813Int8FlatVectorFormat; import org.elasticsearch.index.codec.vectors.ES814HnswScalarQuantizedVectorsFormat; +import org.elasticsearch.index.codec.vectors.ES815BitFlatVectorFormat; +import org.elasticsearch.index.codec.vectors.ES815HnswBitVectorsFormat; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.ArraySourceValueFetcher; @@ -93,13 +96,14 @@ */ public class DenseVectorFieldMapper extends FieldMapper { public static final String COSINE_MAGNITUDE_FIELD_SUFFIX = "._magnitude"; - private static final float EPS = 1e-4f; + private static final float EPS = 1e-3f; - static boolean isNotUnitVector(float magnitude) { + public static boolean isNotUnitVector(float magnitude) { return Math.abs(magnitude - 1.0f) > EPS; } public static final NodeFeature INT4_QUANTIZATION = new NodeFeature("mapper.vectors.int4_quantization"); + public static final NodeFeature BIT_VECTORS = new NodeFeature("mapper.vectors.bit_vectors"); public static final IndexVersion MAGNITUDE_STORED_INDEX_VERSION = IndexVersions.V_7_5_0; public static final IndexVersion INDEXED_BY_DEFAULT_INDEX_VERSION = IndexVersions.FIRST_DETACHED_INDEX_VERSION; @@ -109,6 +113,7 @@ static boolean isNotUnitVector(float magnitude) { public static final String CONTENT_TYPE = "dense_vector"; public static short MAX_DIMS_COUNT = 4096; // maximum allowed number of dimensions + public static int MAX_DIMS_COUNT_BIT = 4096 * Byte.SIZE; // maximum allowed number of dimensions public static short MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING = 128; // minimum number of dims for floats to be dynamically mapped to vector public static final int MAGNITUDE_BYTES = 4; @@ -134,17 +139,28 @@ public static class Builder extends FieldMapper.Builder { throw new MapperParsingException("Property [dims] on field [" + n + "] must be an integer but got [" + o + "]"); } int dims = XContentMapValues.nodeIntegerValue(o); - if (dims < 1 || dims > MAX_DIMS_COUNT) { + int maxDims = elementType.getValue() == ElementType.BIT ? MAX_DIMS_COUNT_BIT : MAX_DIMS_COUNT; + int minDims = elementType.getValue() == ElementType.BIT ? Byte.SIZE : 1; + if (dims < minDims || dims > maxDims) { throw new MapperParsingException( "The number of dimensions for field [" + n - + "] should be in the range [1, " - + MAX_DIMS_COUNT + + "] should be in the range [" + + minDims + + ", " + + maxDims + "] but was [" + dims + "]" ); } + if (elementType.getValue() == ElementType.BIT) { + if (dims % Byte.SIZE != 0) { + throw new MapperParsingException( + "The number of dimensions for field [" + n + "] should be a multiple of 8 but was [" + dims + "]" + ); + } + } return dims; }, m -> toType(m).fieldType().dims, XContentBuilder::field, Object::toString).setSerializerCheck((id, ic, v) -> v != null) .setMergeValidator((previous, current, c) -> previous == null || Objects.equals(previous, current)); @@ -171,13 +187,27 @@ public Builder(String name, IndexVersion indexVersionCreated) { "similarity", false, m -> toType(m).fieldType().similarity, - (Supplier) () -> indexedByDefault && indexed.getValue() ? VectorSimilarity.COSINE : null, + (Supplier) () -> { + if (indexedByDefault && indexed.getValue()) { + return elementType.getValue() == ElementType.BIT ? VectorSimilarity.L2_NORM : VectorSimilarity.COSINE; + } + return null; + }, VectorSimilarity.class - ).acceptsNull().setSerializerCheck((id, ic, v) -> v != null); + ).acceptsNull().setSerializerCheck((id, ic, v) -> v != null).addValidator(vectorSim -> { + if (vectorSim == null) { + return; + } + if (elementType.getValue() == ElementType.BIT && vectorSim != VectorSimilarity.L2_NORM) { + throw new IllegalArgumentException( + "The [" + VectorSimilarity.L2_NORM + "] similarity is the only supported similarity for bit vectors" + ); + } + }); this.indexOptions = new Parameter<>( "index_options", true, - () -> defaultInt8Hnsw && elementType.getValue() != ElementType.BYTE && this.indexed.getValue() + () -> defaultInt8Hnsw && elementType.getValue() == ElementType.FLOAT && this.indexed.getValue() ? new Int8HnswIndexOptions( Lucene99HnswVectorsFormat.DEFAULT_MAX_CONN, Lucene99HnswVectorsFormat.DEFAULT_BEAM_WIDTH, @@ -242,12 +272,20 @@ public Builder dimensions(int dimensions) { return this; } + public Builder elementType(ElementType elementType) { + this.elementType.setValue(elementType); + return this; + } + @Override public DenseVectorFieldMapper build(MapperBuilderContext context) { + // Validate again here because the dimensions or element type could have been set programmatically, + // which affects index option validity + validate(); return new DenseVectorFieldMapper( - name(), + leafName(), new DenseVectorFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexVersionCreated, elementType.getValue(), dims.getValue(), @@ -266,7 +304,7 @@ public DenseVectorFieldMapper build(MapperBuilderContext context) { public enum ElementType { - BYTE(1) { + BYTE { @Override public String toString() { @@ -371,7 +409,7 @@ void checkVectorMagnitude( } @Override - public double computeDotProduct(VectorData vectorData) { + public double computeSquaredMagnitude(VectorData vectorData) { return VectorUtil.dotProduct(vectorData.asByteVector(), vectorData.asByteVector()); } @@ -428,7 +466,7 @@ private VectorData parseHexEncodedVector(DocumentParserContext context, DenseVec byte[] decodedVector = HexFormat.of().parseHex(context.parser().text()); fieldMapper.checkDimensionMatches(decodedVector.length, context); VectorData vectorData = VectorData.fromBytes(decodedVector); - double squaredMagnitude = computeDotProduct(vectorData); + double squaredMagnitude = computeSquaredMagnitude(vectorData); checkVectorMagnitude( fieldMapper.fieldType().similarity, errorByteElementsAppender(decodedVector), @@ -463,7 +501,7 @@ public void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFie @Override int getNumBytes(int dimensions) { - return dimensions * elementBytes; + return dimensions; } @Override @@ -494,7 +532,7 @@ int parseDimensionCount(DocumentParserContext context) throws IOException { } }, - FLOAT(4) { + FLOAT { @Override public String toString() { @@ -596,7 +634,7 @@ void checkVectorMagnitude( } @Override - public double computeDotProduct(VectorData vectorData) { + public double computeSquaredMagnitude(VectorData vectorData) { return VectorUtil.dotProduct(vectorData.asFloatVector(), vectorData.asFloatVector()); } @@ -656,7 +694,7 @@ VectorData parseKnnVector(DocumentParserContext context, DenseVectorFieldMapper @Override int getNumBytes(int dimensions) { - return dimensions * elementBytes; + return dimensions * Float.BYTES; } @Override @@ -665,13 +703,249 @@ ByteBuffer createByteBuffer(IndexVersion indexVersion, int numBytes) { ? ByteBuffer.wrap(new byte[numBytes]).order(ByteOrder.LITTLE_ENDIAN) : ByteBuffer.wrap(new byte[numBytes]); } - }; + }, - final int elementBytes; + BIT { - ElementType(int elementBytes) { - this.elementBytes = elementBytes; - } + @Override + public String toString() { + return "bit"; + } + + @Override + public void writeValue(ByteBuffer byteBuffer, float value) { + byteBuffer.put((byte) value); + } + + @Override + public void readAndWriteValue(ByteBuffer byteBuffer, XContentBuilder b) throws IOException { + b.value(byteBuffer.get()); + } + + private KnnByteVectorField createKnnVectorField(String name, byte[] vector, VectorSimilarityFunction function) { + if (vector == null) { + throw new IllegalArgumentException("vector value must not be null"); + } + FieldType denseVectorFieldType = new FieldType(); + denseVectorFieldType.setVectorAttributes(vector.length, VectorEncoding.BYTE, function); + denseVectorFieldType.freeze(); + return new KnnByteVectorField(name, vector, denseVectorFieldType); + } + + @Override + IndexFieldData.Builder fielddataBuilder(DenseVectorFieldType denseVectorFieldType, FieldDataContext fieldDataContext) { + return new VectorIndexFieldData.Builder( + denseVectorFieldType.name(), + CoreValuesSourceType.KEYWORD, + denseVectorFieldType.indexVersionCreated, + this, + denseVectorFieldType.dims, + denseVectorFieldType.indexed, + r -> r + ); + } + + @Override + public void checkVectorBounds(float[] vector) { + checkNanAndInfinite(vector); + + StringBuilder errorBuilder = null; + + for (int index = 0; index < vector.length; ++index) { + float value = vector[index]; + + if (value % 1.0f != 0.0f) { + errorBuilder = new StringBuilder( + "element_type [" + + this + + "] vectors only support non-decimal values but found decimal value [" + + value + + "] at dim [" + + index + + "];" + ); + break; + } + + if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { + errorBuilder = new StringBuilder( + "element_type [" + + this + + "] vectors only support integers between [" + + Byte.MIN_VALUE + + ", " + + Byte.MAX_VALUE + + "] but found [" + + value + + "] at dim [" + + index + + "];" + ); + break; + } + } + + if (errorBuilder != null) { + throw new IllegalArgumentException(appendErrorElements(errorBuilder, vector).toString()); + } + } + + @Override + void checkVectorMagnitude( + VectorSimilarity similarity, + Function appender, + float squaredMagnitude + ) {} + + @Override + public double computeSquaredMagnitude(VectorData vectorData) { + int count = 0; + int i = 0; + byte[] byteBits = vectorData.asByteVector(); + for (int upperBound = byteBits.length & -8; i < upperBound; i += 8) { + count += Long.bitCount((long) BitUtil.VH_NATIVE_LONG.get(byteBits, i)); + } + + while (i < byteBits.length) { + count += Integer.bitCount(byteBits[i] & 255); + ++i; + } + return count; + } + + private VectorData parseVectorArray(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + int index = 0; + byte[] vector = new byte[fieldMapper.fieldType().dims / Byte.SIZE]; + for (XContentParser.Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser() + .nextToken()) { + fieldMapper.checkDimensionExceeded(index, context); + ensureExpectedToken(Token.VALUE_NUMBER, token, context.parser()); + final int value; + if (context.parser().numberType() != XContentParser.NumberType.INT) { + float floatValue = context.parser().floatValue(true); + if (floatValue % 1.0f != 0.0f) { + throw new IllegalArgumentException( + "element_type [" + + this + + "] vectors only support non-decimal values but found decimal value [" + + floatValue + + "] at dim [" + + index + + "];" + ); + } + value = (int) floatValue; + } else { + value = context.parser().intValue(true); + } + if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { + throw new IllegalArgumentException( + "element_type [" + + this + + "] vectors only support integers between [" + + Byte.MIN_VALUE + + ", " + + Byte.MAX_VALUE + + "] but found [" + + value + + "] at dim [" + + index + + "];" + ); + } + if (index >= vector.length) { + throw new IllegalArgumentException( + "The number of dimensions for field [" + + fieldMapper.fieldType().name() + + "] should be [" + + fieldMapper.fieldType().dims + + "] but found [" + + (index + 1) * Byte.SIZE + + "]" + ); + } + vector[index++] = (byte) value; + } + fieldMapper.checkDimensionMatches(index * Byte.SIZE, context); + return VectorData.fromBytes(vector); + } + + private VectorData parseHexEncodedVector(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + byte[] decodedVector = HexFormat.of().parseHex(context.parser().text()); + fieldMapper.checkDimensionMatches(decodedVector.length * Byte.SIZE, context); + return VectorData.fromBytes(decodedVector); + } + + @Override + VectorData parseKnnVector(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + XContentParser.Token token = context.parser().currentToken(); + return switch (token) { + case START_ARRAY -> parseVectorArray(context, fieldMapper); + case VALUE_STRING -> parseHexEncodedVector(context, fieldMapper); + default -> throw new ParsingException( + context.parser().getTokenLocation(), + format("Unsupported type [%s] for provided value [%s]", token, context.parser().text()) + ); + }; + } + + @Override + public void parseKnnVectorAndIndex(DocumentParserContext context, DenseVectorFieldMapper fieldMapper) throws IOException { + VectorData vectorData = parseKnnVector(context, fieldMapper); + Field field = createKnnVectorField( + fieldMapper.fieldType().name(), + vectorData.asByteVector(), + fieldMapper.fieldType().similarity.vectorSimilarityFunction(fieldMapper.indexCreatedVersion, this) + ); + context.doc().addWithKey(fieldMapper.fieldType().name(), field); + } + + @Override + int getNumBytes(int dimensions) { + assert dimensions % Byte.SIZE == 0; + return dimensions / Byte.SIZE; + } + + @Override + ByteBuffer createByteBuffer(IndexVersion indexVersion, int numBytes) { + return ByteBuffer.wrap(new byte[numBytes]); + } + + @Override + int parseDimensionCount(DocumentParserContext context) throws IOException { + XContentParser.Token currentToken = context.parser().currentToken(); + return switch (currentToken) { + case START_ARRAY -> { + int index = 0; + for (Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser().nextToken()) { + index++; + } + yield index * Byte.SIZE; + } + case VALUE_STRING -> { + byte[] decodedVector = HexFormat.of().parseHex(context.parser().text()); + yield decodedVector.length * Byte.SIZE; + } + default -> throw new ParsingException( + context.parser().getTokenLocation(), + format("Unsupported type [%s] for provided value [%s]", currentToken, context.parser().text()) + ); + }; + } + + @Override + public void checkDimensions(int dvDims, int qvDims) { + if (dvDims != qvDims * Byte.SIZE) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + + qvDims * Byte.SIZE + + "] than the document vectors [" + + dvDims + + "]." + ); + } + } + }; public abstract void writeValue(ByteBuffer byteBuffer, float value); @@ -695,6 +969,14 @@ abstract void checkVectorMagnitude( float squaredMagnitude ); + public void checkDimensions(int dvDims, int qvDims) { + if (dvDims != qvDims) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + qvDims + "] than the document vectors [" + dvDims + "]." + ); + } + } + int parseDimensionCount(DocumentParserContext context) throws IOException { int index = 0; for (Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser().nextToken()) { @@ -775,7 +1057,7 @@ static Function errorByteElementsAppender(byte[] v return sb -> appendErrorElements(sb, vector); } - public abstract double computeDotProduct(VectorData vectorData); + public abstract double computeSquaredMagnitude(VectorData vectorData); public static ElementType fromString(String name) { return valueOf(name.trim().toUpperCase(Locale.ROOT)); @@ -786,7 +1068,9 @@ public static ElementType fromString(String name) { ElementType.BYTE.toString(), ElementType.BYTE, ElementType.FLOAT.toString(), - ElementType.FLOAT + ElementType.FLOAT, + ElementType.BIT.toString(), + ElementType.BIT ); public enum VectorSimilarity { @@ -795,6 +1079,7 @@ public enum VectorSimilarity { float score(float similarity, ElementType elementType, int dim) { return switch (elementType) { case BYTE, FLOAT -> 1f / (1f + similarity * similarity); + case BIT -> (dim - similarity) / dim; }; } @@ -806,8 +1091,10 @@ public VectorSimilarityFunction vectorSimilarityFunction(IndexVersion indexVersi COSINE { @Override float score(float similarity, ElementType elementType, int dim) { + assert elementType != ElementType.BIT; return switch (elementType) { case BYTE, FLOAT -> (1 + similarity) / 2f; + default -> throw new IllegalArgumentException("Unsupported element type [" + elementType + "]"); }; } @@ -824,6 +1111,7 @@ float score(float similarity, ElementType elementType, int dim) { return switch (elementType) { case BYTE -> 0.5f + similarity / (float) (dim * (1 << 15)); case FLOAT -> (1 + similarity) / 2f; + default -> throw new IllegalArgumentException("Unsupported element type [" + elementType + "]"); }; } @@ -837,6 +1125,7 @@ public VectorSimilarityFunction vectorSimilarityFunction(IndexVersion indexVersi float score(float similarity, ElementType elementType, int dim) { return switch (elementType) { case BYTE, FLOAT -> similarity < 0 ? 1 / (1 + -1 * similarity) : similarity + 1; + default -> throw new IllegalArgumentException("Unsupported element type [" + elementType + "]"); }; } @@ -863,7 +1152,7 @@ abstract static class IndexOptions implements ToXContent { this.type = type; } - abstract KnnVectorsFormat getVectorsFormat(); + abstract KnnVectorsFormat getVectorsFormat(ElementType elementType); boolean supportsElementType(ElementType elementType) { return true; @@ -1002,7 +1291,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - KnnVectorsFormat getVectorsFormat() { + KnnVectorsFormat getVectorsFormat(ElementType elementType) { + assert elementType == ElementType.FLOAT; return new ES813Int8FlatVectorFormat(confidenceInterval, 7, false); } @@ -1021,7 +1311,7 @@ public int hashCode() { @Override boolean supportsElementType(ElementType elementType) { - return elementType != ElementType.BYTE; + return elementType == ElementType.FLOAT; } @Override @@ -1047,7 +1337,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - KnnVectorsFormat getVectorsFormat() { + KnnVectorsFormat getVectorsFormat(ElementType elementType) { + if (elementType.equals(ElementType.BIT)) { + return new ES815BitFlatVectorFormat(); + } return new ES813FlatVectorFormat(); } @@ -1083,7 +1376,8 @@ static class Int4HnswIndexOptions extends IndexOptions { } @Override - public KnnVectorsFormat getVectorsFormat() { + public KnnVectorsFormat getVectorsFormat(ElementType elementType) { + assert elementType == ElementType.FLOAT; return new ES814HnswScalarQuantizedVectorsFormat(m, efConstruction, confidenceInterval, 4, true); } @@ -1126,7 +1420,7 @@ public String toString() { @Override boolean supportsElementType(ElementType elementType) { - return elementType != ElementType.BYTE; + return elementType == ElementType.FLOAT; } @Override @@ -1153,7 +1447,8 @@ static class Int4FlatIndexOptions extends IndexOptions { } @Override - public KnnVectorsFormat getVectorsFormat() { + public KnnVectorsFormat getVectorsFormat(ElementType elementType) { + assert elementType == ElementType.FLOAT; return new ES813Int8FlatVectorFormat(confidenceInterval, 4, true); } @@ -1186,7 +1481,7 @@ public String toString() { @Override boolean supportsElementType(ElementType elementType) { - return elementType != ElementType.BYTE; + return elementType == ElementType.FLOAT; } @Override @@ -1216,7 +1511,8 @@ static class Int8HnswIndexOptions extends IndexOptions { } @Override - public KnnVectorsFormat getVectorsFormat() { + public KnnVectorsFormat getVectorsFormat(ElementType elementType) { + assert elementType == ElementType.FLOAT; return new ES814HnswScalarQuantizedVectorsFormat(m, efConstruction, confidenceInterval, 7, false); } @@ -1261,7 +1557,7 @@ public String toString() { @Override boolean supportsElementType(ElementType elementType) { - return elementType != ElementType.BYTE; + return elementType == ElementType.FLOAT; } @Override @@ -1291,7 +1587,10 @@ static class HnswIndexOptions extends IndexOptions { } @Override - public KnnVectorsFormat getVectorsFormat() { + public KnnVectorsFormat getVectorsFormat(ElementType elementType) { + if (elementType == ElementType.BIT) { + return new ES815HnswBitVectorsFormat(m, efConstruction); + } return new Lucene99HnswVectorsFormat(m, efConstruction, 1, null); } @@ -1412,48 +1711,6 @@ public Query termQuery(Object value, SearchExecutionContext context) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support term queries"); } - public Query createKnnQuery( - byte[] queryVector, - int numCands, - Query filter, - Float similarityThreshold, - BitSetProducer parentFilter - ) { - if (isIndexed() == false) { - throw new IllegalArgumentException( - "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" - ); - } - - if (queryVector.length != dims) { - throw new IllegalArgumentException( - "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" - ); - } - - if (elementType != ElementType.BYTE) { - throw new IllegalArgumentException( - "only [" + ElementType.BYTE + "] elements are supported when querying field [" + name() + "]" - ); - } - - if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { - float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); - elementType.checkVectorMagnitude(similarity, ElementType.errorByteElementsAppender(queryVector), squaredMagnitude); - } - Query knnQuery = parentFilter != null - ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) - : new ESKnnByteVectorQuery(name(), queryVector, numCands, filter); - if (similarityThreshold != null) { - knnQuery = new VectorSimilarityQuery( - knnQuery, - similarityThreshold, - similarity.score(similarityThreshold, elementType, dims) - ); - } - return knnQuery; - } - public Query createExactKnnQuery(VectorData queryVector) { if (isIndexed() == false) { throw new IllegalArgumentException( @@ -1463,15 +1720,17 @@ public Query createExactKnnQuery(VectorData queryVector) { return switch (elementType) { case BYTE -> createExactKnnByteQuery(queryVector.asByteVector()); case FLOAT -> createExactKnnFloatQuery(queryVector.asFloatVector()); + case BIT -> createExactKnnBitQuery(queryVector.asByteVector()); }; } + private Query createExactKnnBitQuery(byte[] queryVector) { + elementType.checkDimensions(dims, queryVector.length); + return new DenseVectorQuery.Bytes(queryVector, name()); + } + private Query createExactKnnByteQuery(byte[] queryVector) { - if (queryVector.length != dims) { - throw new IllegalArgumentException( - "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" - ); - } + elementType.checkDimensions(dims, queryVector.length); if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); elementType.checkVectorMagnitude(similarity, ElementType.errorByteElementsAppender(queryVector), squaredMagnitude); @@ -1480,11 +1739,7 @@ private Query createExactKnnByteQuery(byte[] queryVector) { } private Query createExactKnnFloatQuery(float[] queryVector) { - if (queryVector.length != dims) { - throw new IllegalArgumentException( - "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" - ); - } + elementType.checkDimensions(dims, queryVector.length); elementType.checkVectorBounds(queryVector); if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); @@ -1502,12 +1757,20 @@ && isNotUnitVector(squaredMagnitude)) { return new DenseVectorQuery.Floats(queryVector, name()); } - Query createKnnQuery(float[] queryVector, int numCands, Query filter, Float similarityThreshold, BitSetProducer parentFilter) { - return createKnnQuery(VectorData.fromFloats(queryVector), numCands, filter, similarityThreshold, parentFilter); + Query createKnnQuery( + float[] queryVector, + Integer k, + int numCands, + Query filter, + Float similarityThreshold, + BitSetProducer parentFilter + ) { + return createKnnQuery(VectorData.fromFloats(queryVector), k, numCands, filter, similarityThreshold, parentFilter); } public Query createKnnQuery( VectorData queryVector, + Integer k, int numCands, Query filter, Float similarityThreshold, @@ -1519,31 +1782,51 @@ public Query createKnnQuery( ); } return switch (getElementType()) { - case BYTE -> createKnnByteQuery(queryVector.asByteVector(), numCands, filter, similarityThreshold, parentFilter); - case FLOAT -> createKnnFloatQuery(queryVector.asFloatVector(), numCands, filter, similarityThreshold, parentFilter); + case BYTE -> createKnnByteQuery(queryVector.asByteVector(), k, numCands, filter, similarityThreshold, parentFilter); + case FLOAT -> createKnnFloatQuery(queryVector.asFloatVector(), k, numCands, filter, similarityThreshold, parentFilter); + case BIT -> createKnnBitQuery(queryVector.asByteVector(), k, numCands, filter, similarityThreshold, parentFilter); }; } - private Query createKnnByteQuery( + private Query createKnnBitQuery( byte[] queryVector, + Integer k, int numCands, Query filter, Float similarityThreshold, BitSetProducer parentFilter ) { - if (queryVector.length != dims) { - throw new IllegalArgumentException( - "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" + elementType.checkDimensions(dims, queryVector.length); + Query knnQuery = parentFilter != null + ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), queryVector, filter, k, numCands, parentFilter) + : new ESKnnByteVectorQuery(name(), queryVector, k, numCands, filter); + if (similarityThreshold != null) { + knnQuery = new VectorSimilarityQuery( + knnQuery, + similarityThreshold, + similarity.score(similarityThreshold, elementType, dims) ); } + return knnQuery; + } + + private Query createKnnByteQuery( + byte[] queryVector, + Integer k, + int numCands, + Query filter, + Float similarityThreshold, + BitSetProducer parentFilter + ) { + elementType.checkDimensions(dims, queryVector.length); if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); elementType.checkVectorMagnitude(similarity, ElementType.errorByteElementsAppender(queryVector), squaredMagnitude); } Query knnQuery = parentFilter != null - ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) - : new ESKnnByteVectorQuery(name(), queryVector, numCands, filter); + ? new ESDiversifyingChildrenByteKnnVectorQuery(name(), queryVector, filter, k, numCands, parentFilter) + : new ESKnnByteVectorQuery(name(), queryVector, k, numCands, filter); if (similarityThreshold != null) { knnQuery = new VectorSimilarityQuery( knnQuery, @@ -1556,16 +1839,13 @@ private Query createKnnByteQuery( private Query createKnnFloatQuery( float[] queryVector, + Integer k, int numCands, Query filter, Float similarityThreshold, BitSetProducer parentFilter ) { - if (queryVector.length != dims) { - throw new IllegalArgumentException( - "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" - ); - } + elementType.checkDimensions(dims, queryVector.length); elementType.checkVectorBounds(queryVector); if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); @@ -1581,8 +1861,8 @@ && isNotUnitVector(squaredMagnitude)) { } } Query knnQuery = parentFilter != null - ? new ESDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) - : new ESKnnFloatVectorQuery(name(), queryVector, numCands, filter); + ? new ESDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, k, numCands, parentFilter) + : new ESKnnFloatVectorQuery(name(), queryVector, k, numCands, filter); if (similarityThreshold != null) { knnQuery = new VectorSimilarityQuery( knnQuery, @@ -1641,7 +1921,7 @@ public void parse(DocumentParserContext context) throws IOException { if (context.doc().getByKey(fieldType().name()) != null) { throw new IllegalArgumentException( "Field [" - + name() + + fullPath() + "] of type [" + typeName() + "] doesn't support indexing multiple values for the same field in the same document" @@ -1666,7 +1946,7 @@ public void parse(DocumentParserContext context) throws IOException { fieldType().meta() ); Mapper update = new DenseVectorFieldMapper( - simpleName(), + leafName(), updatedDenseVectorFieldType, indexOptions, indexCreatedVersion, @@ -1701,7 +1981,7 @@ private void parseBinaryDocValuesVectorAndIndex(DocumentParserContext context) t vectorData.addToBuffer(byteBuffer); if (indexCreatedVersion.onOrAfter(MAGNITUDE_STORED_INDEX_VERSION)) { // encode vector magnitude at the end - double dotProduct = elementType.computeDotProduct(vectorData); + double dotProduct = elementType.computeSquaredMagnitude(vectorData); float vectorMagnitude = (float) Math.sqrt(dotProduct); byteBuffer.putFloat(vectorMagnitude); } @@ -1715,7 +1995,7 @@ private void checkDimensionExceeded(int index, DocumentParserContext context) { "The [" + typeName() + "] field [" - + name() + + fullPath() + "] in doc [" + context.documentDescription() + "] has more dimensions " @@ -1732,7 +2012,7 @@ private void checkDimensionMatches(int index, DocumentParserContext context) { "The [" + typeName() + "] field [" - + name() + + fullPath() + "] in doc [" + context.documentDescription() + "] has a different number of dimensions " @@ -1757,7 +2037,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), indexCreatedVersion).init(this); + return new Builder(leafName(), indexCreatedVersion).init(this); } private static IndexOptions parseIndexOptions(String fieldName, Object propNode) { @@ -1780,9 +2060,9 @@ private static IndexOptions parseIndexOptions(String fieldName, Object propNode) public KnnVectorsFormat getKnnVectorsFormatForField(KnnVectorsFormat defaultFormat) { final KnnVectorsFormat format; if (indexOptions == null) { - format = defaultFormat; + format = fieldType().elementType == ElementType.BIT ? new ES815HnswBitVectorsFormat() : defaultFormat; } else { - format = indexOptions.getVectorsFormat(); + format = indexOptions.getVectorsFormat(fieldType().elementType); } // It's legal to reuse the same format name as this is the same on-disk format. return new KnnVectorsFormat(format.getName()) { @@ -1817,7 +2097,7 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } if (fieldType().indexed) { @@ -1848,10 +2128,10 @@ public Stream> storedFieldLoaders() { @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { - values = leafReader.getFloatVectorValues(name()); + values = leafReader.getFloatVectorValues(fullPath()); if (values != null) { if (indexCreatedVersion.onOrAfter(NORMALIZE_COSINE) && VectorSimilarity.COSINE.equals(vectorSimilarity)) { - magnitudeReader = leafReader.getNumericDocValues(name() + COSINE_MAGNITUDE_FIELD_SUFFIX); + magnitudeReader = leafReader.getNumericDocValues(fullPath() + COSINE_MAGNITUDE_FIELD_SUFFIX); } return docId -> { hasValue = docId == values.advance(docId); @@ -1859,7 +2139,7 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf return hasValue; }; } - byteVectorValues = leafReader.getByteVectorValues(name()); + byteVectorValues = leafReader.getByteVectorValues(fullPath()); if (byteVectorValues != null) { return docId -> { hasValue = docId == byteVectorValues.advance(docId); @@ -1883,7 +2163,7 @@ public void write(XContentBuilder b) throws IOException { if (hasMagnitude) { magnitude = Float.intBitsToFloat((int) magnitudeReader.longValue()); } - b.startArray(simpleName()); + b.startArray(leafName()); if (values != null) { for (float v : values.vectorValue()) { if (hasMagnitude) { @@ -1903,7 +2183,7 @@ public void write(XContentBuilder b) throws IOException { @Override public String fieldName() { - return name(); + return fullPath(); } } @@ -1923,7 +2203,7 @@ public Stream> storedFieldLoaders() { @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { - values = leafReader.getBinaryDocValues(name()); + values = leafReader.getBinaryDocValues(fullPath()); if (values == null) { return null; } @@ -1943,7 +2223,7 @@ public void write(XContentBuilder b) throws IOException { if (false == hasValue) { return; } - b.startArray(simpleName()); + b.startArray(leafName()); BytesRef ref = values.binaryValue(); ByteBuffer byteBuffer = ByteBuffer.wrap(ref.bytes, ref.offset, ref.length); if (indexCreatedVersion.onOrAfter(LITTLE_ENDIAN_FLOAT_STORED_INDEX_VERSION)) { @@ -1958,7 +2238,7 @@ public void write(XContentBuilder b) throws IOException { @Override public String fieldName() { - return name(); + return fullPath(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java index e07c9247072b9..62be6910bf6f7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java @@ -67,8 +67,8 @@ protected Parameter[] getParameters() { @Override public SparseVectorFieldMapper build(MapperBuilderContext context) { return new SparseVectorFieldMapper( - name(), - new SparseVectorFieldType(context.buildFullName(name()), meta.getValue()), + leafName(), + new SparseVectorFieldType(context.buildFullName(leafName()), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo ); @@ -142,7 +142,7 @@ public Map indexAnalyzers() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override @@ -186,14 +186,14 @@ public void parse(DocumentParserContext context) throws IOException { } else if (token == Token.VALUE_NULL) { // ignore feature, this is consistent with numeric fields } else if (token == Token.VALUE_NUMBER || token == Token.VALUE_STRING) { - final String key = name() + "." + feature; + final String key = fullPath() + "." + feature; float value = context.parser().floatValue(true); // if we have an existing feature of the same name we'll select for the one with the max value // based on recommendations from this paper: https://arxiv.org/pdf/2305.18494.pdf IndexableField currentField = context.doc().getByKey(key); if (currentField == null) { - context.doc().addWithKey(key, new FeatureField(name(), feature, value)); + context.doc().addWithKey(key, new FeatureField(fullPath(), feature, value)); } else if (currentField instanceof FeatureField && ((FeatureField) currentField).getFeatureValue() < value) { ((FeatureField) currentField).setFeatureValue(value); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorDVLeafFieldData.java index d66b429e6dd95..f35ba3a0fd5b8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorDVLeafFieldData.java @@ -17,6 +17,8 @@ import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.vectors.BinaryDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.BitBinaryDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.BitKnnDenseVectorDocValuesField; import org.elasticsearch.script.field.vectors.ByteBinaryDenseVectorDocValuesField; import org.elasticsearch.script.field.vectors.ByteKnnDenseVectorDocValuesField; import org.elasticsearch.script.field.vectors.KnnDenseVectorDocValuesField; @@ -58,12 +60,14 @@ public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { return switch (elementType) { case BYTE -> new ByteKnnDenseVectorDocValuesField(reader.getByteVectorValues(field), name, dims); case FLOAT -> new KnnDenseVectorDocValuesField(reader.getFloatVectorValues(field), name, dims); + case BIT -> new BitKnnDenseVectorDocValuesField(reader.getByteVectorValues(field), name, dims); }; } else { BinaryDocValues values = DocValues.getBinary(reader, field); return switch (elementType) { case BYTE -> new ByteBinaryDenseVectorDocValuesField(values, name, elementType, dims); case FLOAT -> new BinaryDenseVectorDocValuesField(values, name, elementType, dims, indexVersion); + case BIT -> new BitBinaryDenseVectorDocValuesField(values, name, elementType, dims); }; } } catch (IOException e) { diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index 2a1062f8876d2..f2fc7c1bd6cd0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -9,11 +9,14 @@ package org.elasticsearch.index.query; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.Collections; @@ -23,19 +26,24 @@ * Context object used to rewrite {@link QueryBuilder} instances into simplified version in the coordinator. * Instances of this object rely on information stored in the {@code IndexMetadata} for certain indices. * Right now this context object is able to rewrite range queries that include a known timestamp field - * (i.e. the timestamp field for DataStreams) into a MatchNoneQueryBuilder and skip the shards that - * don't hold queried data. See IndexMetadata#getTimestampRange() for more details + * (i.e. the timestamp field for DataStreams or the 'event.ingested' field in ECS) into a MatchNoneQueryBuilder + * and skip the shards that don't hold queried data. See IndexMetadata for more details. */ public class CoordinatorRewriteContext extends QueryRewriteContext { - private final IndexLongFieldRange indexLongFieldRange; - private final DateFieldMapper.DateFieldType timestampFieldType; + private final DateFieldRangeInfo dateFieldRangeInfo; + /** + * Context for coordinator search rewrites based on time ranges for the @timestamp field and/or 'event.ingested' field + * @param parserConfig + * @param client + * @param nowInMillis + * @param dateFieldRangeInfo range and field type info for @timestamp and 'event.ingested' + */ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - IndexLongFieldRange indexLongFieldRange, - DateFieldMapper.DateFieldType timestampFieldType + DateFieldRangeInfo dateFieldRangeInfo ) { super( parserConfig, @@ -53,29 +61,98 @@ public CoordinatorRewriteContext( null, null ); - this.indexLongFieldRange = indexLongFieldRange; - this.timestampFieldType = timestampFieldType; + this.dateFieldRangeInfo = dateFieldRangeInfo; } - long getMinTimestamp() { - return indexLongFieldRange.getMin(); + /** + * Get min timestamp for either '@timestamp' or 'event.ingested' fields. Any other field + * passed in will cause an {@link IllegalArgumentException} to be thrown, as these are the only + * two fields supported for coordinator rewrites (based on time range). + * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return min timestamp for the field from IndexMetadata in cluster state. + */ + long getMinTimestamp(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampRange().getMin(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedRange().getMin(); + } else { + throw new IllegalArgumentException( + Strings.format( + "Only [%s] or [%s] fields are supported for min timestamp coordinator rewrites, but got: [%s]", + DataStream.TIMESTAMP_FIELD_NAME, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + fieldName + ) + ); + } } - long getMaxTimestamp() { - return indexLongFieldRange.getMax(); + /** + * Get max timestamp for either '@timestamp' or 'event.ingested' fields. Any other field + * passed in will cause an {@link IllegalArgumentException} to be thrown, as these are the only + * two fields supported for coordinator rewrites (based on time range). + * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return max timestamp for the field from IndexMetadata in cluster state. + */ + long getMaxTimestamp(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampRange().getMax(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedRange().getMax(); + } else { + throw new IllegalArgumentException( + Strings.format( + "Only [%s] or [%s] fields are supported for max timestamp coordinator rewrites, but got: [%s]", + DataStream.TIMESTAMP_FIELD_NAME, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + fieldName + ) + ); + } } - boolean hasTimestampData() { - return indexLongFieldRange.isComplete() && indexLongFieldRange != IndexLongFieldRange.EMPTY; + /** + * Determine whether either '@timestamp' or 'event.ingested' fields has useful timestamp ranges + * stored in cluster state for this context. + * Any other fieldname will cause an {@link IllegalArgumentException} to be thrown, as these are the only + * two fields supported for coordinator rewrites (based on time range). + * @param fieldName Must be DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return min timestamp for the field from IndexMetadata in cluster state. + */ + boolean hasTimestampData(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampRange().isComplete() + && dateFieldRangeInfo.getTimestampRange() != IndexLongFieldRange.EMPTY; + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedRange().isComplete() + && dateFieldRangeInfo.getEventIngestedRange() != IndexLongFieldRange.EMPTY; + } else { + throw new IllegalArgumentException( + Strings.format( + "Only [%s] or [%s] fields are supported for min/max timestamp coordinator rewrites, but got: [%s]", + DataStream.TIMESTAMP_FIELD_NAME, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + fieldName + ) + ); + } } + /** + * @param fieldName Get MappedFieldType for either '@timestamp' or 'event.ingested' fields. + * @return min timestamp for the field from IndexMetadata in cluster state or null if fieldName was not + * DataStream.TIMESTAMP_FIELD_NAME or IndexMetadata.EVENT_INGESTED_FIELD_NAME. + */ @Nullable public MappedFieldType getFieldType(String fieldName) { - if (fieldName.equals(timestampFieldType.name()) == false) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getTimestampFieldType(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.getEventIngestedFieldType(); + } else { return null; } - - return timestampFieldType; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index e44861b4afe8a..8251b82c05af2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.function.Function; @@ -25,14 +26,14 @@ public class CoordinatorRewriteContextProvider { private final Client client; private final LongSupplier nowInMillis; private final Supplier clusterStateSupplier; - private final Function mappingSupplier; + private final Function mappingSupplier; public CoordinatorRewriteContextProvider( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, Supplier clusterStateSupplier, - Function mappingSupplier + Function mappingSupplier ) { this.parserConfig = parserConfig; this.client = client; @@ -49,18 +50,33 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { if (indexMetadata == null) { return null; } - DateFieldMapper.DateFieldType dateFieldType = mappingSupplier.apply(index); - if (dateFieldType == null) { + + DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); + if (dateFieldRangeInfo == null) { return null; } + + DateFieldMapper.DateFieldType timestampFieldType = dateFieldRangeInfo.getTimestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = dateFieldRangeInfo.getEventIngestedFieldType(); IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + if (timestampRange.containsAllShardRanges() == false) { - timestampRange = indexMetadata.getTimeSeriesTimestampRange(dateFieldType); - if (timestampRange == null) { + // if @timestamp range is not present or not ready in cluster state, fallback to using time series range (if present) + timestampRange = indexMetadata.getTimeSeriesTimestampRange(timestampFieldType); + // if timestampRange in the time series is null AND the eventIngestedRange is not ready for use, return null (no coord rewrite) + if (timestampRange == null && eventIngestedRange.containsAllShardRanges() == false) { return null; } } - return new CoordinatorRewriteContext(parserConfig, client, nowInMillis, timestampRange, dateFieldType); + // the DateFieldRangeInfo from the mappingSupplier only has field types, but not ranges + // so create a new object with ranges pulled from cluster state + return new CoordinatorRewriteContext( + parserConfig, + client, + nowInMillis, + new DateFieldRangeInfo(timestampFieldType, timestampRange, eventIngestedFieldType, eventIngestedRange) + ); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 3a96fdc9b0e0e..a6a3d8546187f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; import org.elasticsearch.index.search.NestedHelper; @@ -260,6 +261,26 @@ protected int doHashCode() { @Override protected Query doToQuery(SearchExecutionContext context) throws IOException { + return toQuery((this.query::toQuery), path, scoreMode, ignoreUnmapped, context); + } + + /** + * Returns the primitive Lucene query for a nested query given the primitive query to wrap + * @param exception that the queryProvider may throw + * @param queryProvider Retrieves tye query to use given the SearchExecutionContext + * @param path nested path + * @param scoreMode score mode to use + * @param ignoreUnmapped whether to ignore unmapped fields + * @param context search execution context + * @return the primitive Lucene query + */ + public static Query toQuery( + CheckedFunction queryProvider, + String path, + ScoreMode scoreMode, + boolean ignoreUnmapped, + SearchExecutionContext context + ) throws E { if (context.allowExpensiveQueries() == false) { throw new ElasticsearchException( "[joining] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false." @@ -285,7 +306,7 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { try { context.nestedScope().nextLevel(mapper); - innerQuery = this.query.toQuery(context); + innerQuery = queryProvider.apply(context); } finally { context.nestedScope().previousLevel(); } diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 4d2a6d3eaecdb..ac7fae8ec0145 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -436,11 +436,11 @@ public String getWriteableName() { protected MappedFieldType.Relation getRelation(final CoordinatorRewriteContext coordinatorRewriteContext) { final MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(fieldName); if (fieldType instanceof final DateFieldMapper.DateFieldType dateFieldType) { - if (coordinatorRewriteContext.hasTimestampData() == false) { + if (coordinatorRewriteContext.hasTimestampData(fieldName) == false) { return MappedFieldType.Relation.DISJOINT; } - long minTimestamp = coordinatorRewriteContext.getMinTimestamp(); - long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(); + long minTimestamp = coordinatorRewriteContext.getMinTimestamp(fieldName); + long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(fieldName); DateMathParser dateMathParser = getForceDateParser(); return dateFieldType.isFieldWithinQuery( minTimestamp, diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index 9d3aa9905c744..4b16a5833cf41 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -338,6 +338,7 @@ public > IFD getForField(MappedFieldType fieldType fieldType, new FieldDataContext( getFullyQualifiedIndex().getName(), + getIndexSettings(), () -> this.lookup().forkAndTrackFieldReferences(fieldType.name()), this::sourcePath, fielddataOperation @@ -514,7 +515,13 @@ public void setLookupProviders( this::getFieldType, (fieldType, searchLookup, fielddataOperation) -> indexFieldDataLookup.apply( fieldType, - new FieldDataContext(getFullyQualifiedIndex().getName(), searchLookup, this::sourcePath, fielddataOperation) + new FieldDataContext( + getFullyQualifiedIndex().getName(), + getIndexSettings(), + searchLookup, + this::sourcePath, + fielddataOperation + ) ), sourceProvider, fieldLookupProvider diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java index 2c361fe025dfa..ca6dfa5ef6e51 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java @@ -76,10 +76,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (super.equals(obj)) { - return true; - } - return obj != null && getClass() != obj.getClass(); + return obj == this || (obj != null && obj.getClass() == this.getClass()); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/GaussDecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/GaussDecayFunctionBuilder.java index 4415c87e9815e..1cc9335b5963e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/GaussDecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/GaussDecayFunctionBuilder.java @@ -83,10 +83,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (super.equals(obj)) { - return true; - } - return obj != null && getClass() != obj.getClass(); + return obj == this || (obj != null && obj.getClass() == this.getClass()); } } } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/LinearDecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/LinearDecayFunctionBuilder.java index ff22e1d57f832..70c3c4458a217 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/LinearDecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/LinearDecayFunctionBuilder.java @@ -86,10 +86,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (super.equals(obj)) { - return true; - } - return obj != null && getClass() != obj.getClass(); + return obj == this || (obj != null && obj.getClass() == this.getClass()); } } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 0b3b15670ef78..247c2fd70761e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -280,7 +280,7 @@ public synchronized RetentionLeases getRetentionLeases(final boolean expireLease private long getMinimumReasonableRetainedSeqNo() { final SafeCommitInfo safeCommitInfo = safeCommitInfoSupplier.get(); - return safeCommitInfo.localCheckpoint + 1 - Math.round(Math.ceil(safeCommitInfo.docCount * fileBasedRecoveryThreshold)); + return safeCommitInfo.localCheckpoint() + 1 - Math.round(Math.ceil(safeCommitInfo.docCount() * fileBasedRecoveryThreshold)); // NB safeCommitInfo.docCount is a very low-level count of the docs in the index, and in particular if this shard contains nested // docs then safeCommitInfo.docCount counts every child doc separately from the parent doc. However every part of a nested document // has the same seqno, so we may be overestimating the cost of a file-based recovery when compared to an ops-based recovery and diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 0cd451f6be2cf..bb4ef40d28129 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -103,15 +103,7 @@ public static long max(final long maxSeqNo, final long seqNo) { } } - public static final class CommitInfo { - public final long maxSeqNo; - public final long localCheckpoint; - - public CommitInfo(long maxSeqNo, long localCheckpoint) { - this.maxSeqNo = maxSeqNo; - this.localCheckpoint = localCheckpoint; - } - + public record CommitInfo(long maxSeqNo, long localCheckpoint) { @Override public String toString() { return "CommitInfo{maxSeqNo=" + maxSeqNo + ", localCheckpoint=" + localCheckpoint + '}'; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index b3f19b1b7a81d..73cbca36a69c8 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1856,8 +1856,8 @@ private void doLocalRecovery( return; } - assert safeCommit.get().localCheckpoint <= globalCheckpoint : safeCommit.get().localCheckpoint + " > " + globalCheckpoint; - if (safeCommit.get().localCheckpoint == globalCheckpoint) { + assert safeCommit.get().localCheckpoint() <= globalCheckpoint : safeCommit.get().localCheckpoint() + " > " + globalCheckpoint; + if (safeCommit.get().localCheckpoint() == globalCheckpoint) { logger.trace( "skip local recovery as the safe commit is up to date; safe commit {} global checkpoint {}", safeCommit.get(), @@ -1876,7 +1876,7 @@ private void doLocalRecovery( globalCheckpoint ); recoveryState.getTranslog().totalLocal(0); - recoveryStartingSeqNoListener.onResponse(safeCommit.get().localCheckpoint + 1); + recoveryStartingSeqNoListener.onResponse(safeCommit.get().localCheckpoint() + 1); return; } @@ -1915,7 +1915,7 @@ private void doLocalRecovery( // we need to find the safe commit again as we should have created a new one during the local recovery final Optional newSafeCommit = store.findSafeIndexCommit(globalCheckpoint); assert newSafeCommit.isPresent() : "no safe commit found after local recovery"; - return newSafeCommit.get().localCheckpoint + 1; + return newSafeCommit.get().localCheckpoint() + 1; } catch (Exception e) { logger.debug( () -> format( @@ -2230,10 +2230,19 @@ public RecoveryState recoveryState() { @Override public ShardLongFieldRange getTimestampRange() { + return determineShardLongFieldRange(DataStream.TIMESTAMP_FIELD_NAME); + } + + @Override + public ShardLongFieldRange getEventIngestedRange() { + return determineShardLongFieldRange(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + } + + private ShardLongFieldRange determineShardLongFieldRange(String fieldName) { if (mapperService() == null) { return ShardLongFieldRange.UNKNOWN; // no mapper service, no idea if the field even exists } - final MappedFieldType mappedFieldType = mapperService().fieldType(DataStream.TIMESTAMP_FIELD_NAME); + final MappedFieldType mappedFieldType = mapperService().fieldType(fieldName); if (mappedFieldType instanceof DateFieldMapper.DateFieldType == false) { return ShardLongFieldRange.UNKNOWN; // field missing or not a date } @@ -2243,10 +2252,10 @@ public ShardLongFieldRange getTimestampRange() { final ShardLongFieldRange rawTimestampFieldRange; try { - rawTimestampFieldRange = getEngine().getRawFieldRange(DataStream.TIMESTAMP_FIELD_NAME); + rawTimestampFieldRange = getEngine().getRawFieldRange(fieldName); assert rawTimestampFieldRange != null; } catch (IOException | AlreadyClosedException e) { - logger.debug("exception obtaining range for timestamp field", e); + logger.debug("exception obtaining range for field " + fieldName, e); return ShardLongFieldRange.UNKNOWN; } if (rawTimestampFieldRange == ShardLongFieldRange.UNKNOWN) { @@ -3337,7 +3346,7 @@ private void executeRecovery( markAsRecovering(reason, recoveryState); // mark the shard as recovering on the cluster state thread threadPool.generic().execute(ActionRunnable.wrap(ActionListener.wrap(r -> { if (r) { - recoveryListener.onRecoveryDone(recoveryState, getTimestampRange()); + recoveryListener.onRecoveryDone(recoveryState, getTimestampRange(), getEventIngestedRange()); } }, e -> recoveryListener.onRecoveryFailure(new RecoveryFailedException(recoveryState, null, e), true)), action)); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index ace891f9aead6..3783b64a0a04f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -396,7 +396,7 @@ protected static void addNewHistoryCommit(Directory indexDirectory, Terminal ter // We can only safely do it because we will generate a new history uuid this shard. final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); // Also advances the local checkpoint of the last commit to its max_seqno. - userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(commitInfo.maxSeqNo)); + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(commitInfo.maxSeqNo())); } // commit the new history id diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 0acddcf0e45b2..22a7249ec7237 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -36,6 +36,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -197,6 +198,10 @@ static void addIndices( .setIndexCreatedVersionMajor(luceneIndexCreatedVersionMajor); if (indexSort != null) { iwc.setIndexSort(indexSort); + if (indexMetadata != null && indexMetadata.getCreationVersion().onOrAfter(IndexVersions.INDEX_SORTING_ON_NESTED)) { + // Needed to support index sorting in the presence of nested objects. + iwc.setParentField(Engine.ROOT_DOC_FIELD_NAME); + } } try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(hardLinkOrCopyTarget, indexRecoveryStats), iwc)) { diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java index 37150ea748225..05c3554b47602 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java @@ -21,7 +21,6 @@ import org.apache.lucene.store.SimpleFSLockFactory; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -36,8 +35,6 @@ public class FsDirectoryFactory implements IndexStorePlugin.DirectoryFactory { - private static final FeatureFlag MADV_RANDOM_FEATURE_FLAG = new FeatureFlag("madv_random"); - public static final Setting INDEX_LOCK_FACTOR_SETTING = new Setting<>("index.store.fs.fs_lock", "native", (s) -> { return switch (s) { case "native" -> NativeFSLockFactory.INSTANCE; @@ -69,20 +66,12 @@ protected Directory newFSDirectory(Path location, LockFactory lockFactory, Index // Use Lucene defaults final FSDirectory primaryDirectory = FSDirectory.open(location, lockFactory); if (primaryDirectory instanceof MMapDirectory mMapDirectory) { - Directory dir = new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions)); - if (MADV_RANDOM_FEATURE_FLAG.isEnabled() == false) { - dir = disableRandomAdvice(dir); - } - return dir; + return new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions)); } else { return primaryDirectory; } case MMAPFS: - Directory dir = setPreload(new MMapDirectory(location, lockFactory), lockFactory, preLoadExtensions); - if (MADV_RANDOM_FEATURE_FLAG.isEnabled() == false) { - dir = disableRandomAdvice(dir); - } - return dir; + return setPreload(new MMapDirectory(location, lockFactory), lockFactory, preLoadExtensions); case SIMPLEFS: case NIOFS: return new NIOFSDirectory(location, lockFactory); @@ -104,23 +93,6 @@ public static MMapDirectory setPreload(MMapDirectory mMapDirectory, LockFactory return mMapDirectory; } - /** - * Return a {@link FilterDirectory} around the provided {@link Directory} that forcefully disables {@link IOContext#RANDOM random - * access}. - */ - static Directory disableRandomAdvice(Directory dir) { - return new FilterDirectory(dir) { - @Override - public IndexInput openInput(String name, IOContext context) throws IOException { - if (context.randomAccess) { - context = IOContext.READ; - } - assert context.randomAccess == false; - return super.openInput(name, context); - } - }; - } - /** * Returns true iff the directory is a hybrid fs directory */ diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index b7bf3a68ade07..b9c50edf50216 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -155,12 +155,20 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref private final OnClose onClose; private final AbstractRefCounted refCounter = AbstractRefCounted.of(this::closeInternal); // close us once we are done + private boolean hasIndexSort; public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock) { - this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY); + this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY, false); } - public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock, OnClose onClose) { + public Store( + ShardId shardId, + IndexSettings indexSettings, + Directory directory, + ShardLock shardLock, + OnClose onClose, + boolean hasIndexSort + ) { super(shardId, indexSettings); this.directory = new StoreDirectory( byteSizeDirectory(directory, indexSettings, logger), @@ -168,6 +176,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ); this.shardLock = shardLock; this.onClose = onClose; + this.hasIndexSort = hasIndexSort; assert onClose != null; assert shardLock != null; @@ -1520,7 +1529,7 @@ public Optional findSafeIndexCommit(long globalCheck final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commits, globalCheckpoint); final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(safeCommit.getUserData().entrySet()); // all operations of the safe commit must be at most the global checkpoint. - if (commitInfo.maxSeqNo <= globalCheckpoint) { + if (commitInfo.maxSeqNo() <= globalCheckpoint) { return Optional.of(commitInfo); } else { return Optional.empty(); @@ -1541,20 +1550,25 @@ private static Map getUserData(IndexWriter writer) { return userData; } - private static IndexWriter newTemporaryAppendingIndexWriter(final Directory dir, final IndexCommit commit) throws IOException { + private IndexWriter newTemporaryAppendingIndexWriter(final Directory dir, final IndexCommit commit) throws IOException { IndexWriterConfig iwc = newTemporaryIndexWriterConfig().setIndexCommit(commit).setOpenMode(IndexWriterConfig.OpenMode.APPEND); return new IndexWriter(dir, iwc); } - private static IndexWriter newTemporaryEmptyIndexWriter(final Directory dir, final Version luceneVersion) throws IOException { + private IndexWriter newTemporaryEmptyIndexWriter(final Directory dir, final Version luceneVersion) throws IOException { IndexWriterConfig iwc = newTemporaryIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.CREATE) .setIndexCreatedVersionMajor(luceneVersion.major); return new IndexWriter(dir, iwc); } - private static IndexWriterConfig newTemporaryIndexWriterConfig() { + private IndexWriterConfig newTemporaryIndexWriterConfig() { // this config is only used for temporary IndexWriter instances, used to initialize the index or update the commit data, // so we don't want any merges to happen - return indexWriterConfigWithNoMerging(null).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD).setCommitOnClose(false); + var iwc = indexWriterConfigWithNoMerging(null).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD).setCommitOnClose(false); + if (hasIndexSort && indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.INDEX_SORTING_ON_NESTED)) { + // Needed to support index sorting in the presence of nested objects. + iwc.setParentField(Engine.ROOT_DOC_FIELD_NAME); + } + return iwc; } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index d2c862bbf35d7..3be2532e3c3aa 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -149,8 +149,8 @@ public long getLastModifiedTime() throws IOException { * Reads a single operation from the given location. */ Translog.Operation read(Translog.Location location) throws IOException { - assert location.generation == this.generation : "generation mismatch expected: " + generation + " got: " + location.generation; - ByteBuffer buffer = ByteBuffer.allocate(location.size); - return read(checksummedStream(buffer, location.translogLocation, location.size, null)); + assert location.generation() == this.generation : "generation mismatch expected: " + generation + " got: " + location.generation(); + ByteBuffer buffer = ByteBuffer.allocate(location.size()); + return read(checksummedStream(buffer, location.translogLocation(), location.size(), null)); } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index fb0f1ec4b4a51..c02a810ed4952 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -52,7 +51,7 @@ import java.util.Objects; import java.util.OptionalLong; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.LongConsumer; import java.util.function.LongSupplier; @@ -109,8 +108,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final List readers = new ArrayList<>(); private final BigArrays bigArrays; private final DiskIoBufferPool diskIoBufferPool; - protected final ReleasableLock readLock; - protected final ReleasableLock writeLock; + protected final Lock readLock; + protected final Lock writeLock; private final Path location; private TranslogWriter current; @@ -162,9 +161,9 @@ public Translog( this.translogUUID = translogUUID; this.bigArrays = config.getBigArrays(); this.diskIoBufferPool = config.getDiskIoBufferPool(); - ReadWriteLock rwl = new ReentrantReadWriteLock(); - this.readLock = new ReleasableLock(rwl.readLock()); - this.writeLock = new ReleasableLock(rwl.writeLock()); + var rwl = new ReentrantReadWriteLock(); + this.readLock = rwl.readLock(); + this.writeLock = rwl.writeLock(); this.location = config.getTranslogPath(); Files.createDirectories(this.location); @@ -224,7 +223,8 @@ public Translog( private ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException { boolean success = false; ArrayList foundTranslogs = new ArrayList<>(); - try (ReleasableLock ignored = writeLock.acquire()) { + writeLock.lock(); + try { logger.debug("open uncommitted translog checkpoint {}", checkpoint); final long minGenerationToRecoverFrom = checkpoint.minTranslogGeneration; @@ -295,6 +295,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws if (success == false) { IOUtils.closeWhileHandlingException(foundTranslogs); } + writeLock.unlock(); } return foundTranslogs; } @@ -384,13 +385,15 @@ public void close() throws IOException { assert calledFromOutsideOrViaTragedyClose() : "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; if (closed.compareAndSet(false, true)) { - try (ReleasableLock lock = writeLock.acquire()) { + writeLock.lock(); + try { try { current.sync(); } finally { closeFilesIfNoPendingRetentionLocks(); } } finally { + writeLock.unlock(); logger.debug("translog closed"); } } @@ -409,8 +412,11 @@ public Path location() { * Returns the generation of the current transaction log. */ public long currentFileGeneration() { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { return current.getGeneration(); + } finally { + readLock.unlock(); } } @@ -418,7 +424,8 @@ public long currentFileGeneration() { * Returns the minimum file generation referenced by the translog */ public long getMinFileGeneration() { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { if (readers.isEmpty()) { return current.getGeneration(); } else { @@ -426,6 +433,8 @@ public long getMinFileGeneration() { : "the first translog isn't the one with the minimum generation:" + readers; return readers.get(0).getGeneration(); } + } finally { + readLock.unlock(); } } @@ -444,11 +453,14 @@ public long sizeInBytes() { } long earliestLastModifiedAge() { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { ensureOpen(); return findEarliestLastModifiedAge(System.currentTimeMillis(), readers, current); } catch (IOException e) { throw new TranslogException(shardId, "Unable to get the earliest last modified time for the transaction log"); + } finally { + readLock.unlock(); } } @@ -467,12 +479,15 @@ static long findEarliestLastModifiedAge(long currentTime, Iterable r.getGeneration() >= minGeneration) .mapToInt(BaseTranslogReader::totalOperations) .sum(); + } finally { + readLock.unlock(); } } @@ -480,9 +495,15 @@ public int totalOperationsByMinGen(long minGeneration) { * Returns the number of operations in the transaction files that contain operations with seq# above the given number. */ public int estimateTotalOperationsFromMinSeq(long minSeqNo) { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { ensureOpen(); - return readersAboveMinSeqNo(minSeqNo).mapToInt(BaseTranslogReader::totalOperations).sum(); + return Stream.concat(readers.stream(), Stream.of(current)) + .filter(reader -> minSeqNo <= reader.getCheckpoint().maxEffectiveSeqNo()) + .mapToInt(BaseTranslogReader::totalOperations) + .sum(); + } finally { + readLock.unlock(); } } @@ -490,12 +511,15 @@ public int estimateTotalOperationsFromMinSeq(long minSeqNo) { * Returns the size in bytes of the translog files at least the given generation */ public long sizeInBytesByMinGen(long minGeneration) { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { ensureOpen(); return Stream.concat(readers.stream(), Stream.of(current)) .filter(r -> r.getGeneration() >= minGeneration) .mapToLong(BaseTranslogReader::sizeInBytes) .sum(); + } finally { + readLock.unlock(); } } @@ -576,7 +600,8 @@ public Location add(final Operation operation) throws IOException { try (ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays)) { writeOperationWithSize(out, operation); final BytesReference bytes = out.bytes(); - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { ensureOpen(); if (operation.primaryTerm() > current.getPrimaryTerm()) { assert false @@ -596,6 +621,8 @@ public Location add(final Operation operation) throws IOException { ); } return current.add(bytes, operation.seqNo()); + } finally { + readLock.unlock(); } } catch (final AlreadyClosedException | IOException ex) { closeOnTragicEvent(ex); @@ -615,8 +642,11 @@ public Location add(final Operation operation) throws IOException { */ public boolean shouldRollGeneration() { final long threshold = this.indexSettings.getGenerationThresholdSize().getBytes(); - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { return this.current.sizeInBytes() > threshold; + } finally { + readLock.unlock(); } } @@ -625,13 +655,16 @@ public boolean shouldRollGeneration() { * can be returned by the next write. */ public Location getLastWriteLocation() { - try (ReleasableLock lock = readLock.acquire()) { + readLock.lock(); + try { /* * We use position = current - 1 and size = Integer.MAX_VALUE here instead of position current and size = 0 for two reasons: * 1. Translog.Location's compareTo doesn't actually pay attention to size even though it's equals method does. * 2. It feels more right to return a *position* that is before the next write's position rather than rely on the size. */ return new Location(current.generation, current.sizeInBytes() - 1, Integer.MAX_VALUE); + } finally { + readLock.unlock(); } } @@ -645,8 +678,11 @@ public long getLastSyncedGlobalCheckpoint() { } final Checkpoint getLastSyncedCheckpoint() { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { return current.getLastSyncedCheckpoint(); + } finally { + readLock.unlock(); } } @@ -665,7 +701,8 @@ public Snapshot newSnapshot() throws IOException { public Snapshot newSnapshot(long fromSeqNo, long toSeqNo) throws IOException { assert fromSeqNo <= toSeqNo : fromSeqNo + " > " + toSeqNo; assert fromSeqNo >= 0 : "from_seq_no must be non-negative " + fromSeqNo; - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { ensureOpen(); TranslogSnapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current)) .filter(reader -> reader.getCheckpoint().minSeqNo <= toSeqNo && fromSeqNo <= reader.getCheckpoint().maxEffectiveSeqNo()) @@ -673,6 +710,8 @@ public Snapshot newSnapshot(long fromSeqNo, long toSeqNo) throws IOException { .toArray(TranslogSnapshot[]::new); final Snapshot snapshot = newMultiSnapshot(snapshots); return new SeqNoFilterSnapshot(snapshot, fromSeqNo, toSeqNo); + } finally { + readLock.unlock(); } } @@ -681,23 +720,28 @@ public Snapshot newSnapshot(long fromSeqNo, long toSeqNo) throws IOException { * this method will return null. */ public Operation readOperation(Location location) throws IOException { - try (ReleasableLock ignored = readLock.acquire()) { - ensureOpen(); - if (location.generation < getMinFileGeneration()) { - return null; - } - if (current.generation == location.generation) { - // no need to fsync here the read operation will ensure that buffers are written to disk - // if they are still in RAM and we are reading onto that position - return current.read(location); - } else { - // read backwards - it's likely we need to read on that is recent - for (int i = readers.size() - 1; i >= 0; i--) { - TranslogReader translogReader = readers.get(i); - if (translogReader.generation == location.generation) { - return translogReader.read(location); + try { + readLock.lock(); + try { + ensureOpen(); + if (location.generation < getMinFileGeneration()) { + return null; + } + if (current.generation == location.generation) { + // no need to fsync here the read operation will ensure that buffers are written to disk + // if they are still in RAM and we are reading onto that position + return current.read(location); + } else { + // read backwards - it's likely we need to read on that is recent + for (int i = readers.size() - 1; i >= 0; i--) { + TranslogReader translogReader = readers.get(i); + if (translogReader.generation == location.generation) { + return translogReader.read(location); + } } } + } finally { + readLock.unlock(); } } catch (final Exception ex) { closeOnTragicEvent(ex); @@ -727,24 +771,17 @@ private Snapshot newMultiSnapshot(TranslogSnapshot[] snapshots) throws IOExcepti } } - private Stream readersAboveMinSeqNo(long minSeqNo) { - assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread() - : "callers of readersAboveMinSeqNo must hold a lock: readLock [" - + readLock.isHeldByCurrentThread() - + "], writeLock [" - + readLock.isHeldByCurrentThread() - + "]"; - return Stream.concat(readers.stream(), Stream.of(current)).filter(reader -> minSeqNo <= reader.getCheckpoint().maxEffectiveSeqNo()); - } - /** * Acquires a lock on the translog files, preventing them from being trimmed */ public Closeable acquireRetentionLock() { - try (ReleasableLock lock = readLock.acquire()) { + readLock.lock(); + try { ensureOpen(); final long viewGen = getMinFileGeneration(); return acquireTranslogGenFromDeletionPolicy(viewGen); + } finally { + readLock.unlock(); } } @@ -764,9 +801,14 @@ private Closeable acquireTranslogGenFromDeletionPolicy(long viewGen) { * Sync's the translog. */ public void sync() throws IOException { - try (ReleasableLock lock = readLock.acquire()) { - if (closed.get() == false) { - current.sync(); + try { + readLock.lock(); + try { + if (closed.get() == false) { + current.sync(); + } + } finally { + readLock.unlock(); } } catch (final Exception ex) { closeOnTragicEvent(ex); @@ -778,8 +820,11 @@ public void sync() throws IOException { * Returns true if an fsync is required to ensure durability of the translogs operations or it's metadata. */ public boolean syncNeeded() { - try (ReleasableLock lock = readLock.acquire()) { + readLock.lock(); + try { return current.syncNeeded(); + } finally { + readLock.unlock(); } } @@ -799,7 +844,8 @@ static String getCommitCheckpointFileName(long generation) { public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { assert aboveSeqNo >= SequenceNumbers.NO_OPS_PERFORMED : "aboveSeqNo has to a valid sequence number"; - try (ReleasableLock lock = writeLock.acquire()) { + writeLock.lock(); + try { ensureOpen(); if (current.getPrimaryTerm() < belowTerm) { throw new IllegalArgumentException( @@ -831,6 +877,8 @@ public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { this.readers.clear(); this.readers.addAll(newReaders); + } finally { + writeLock.unlock(); } } @@ -840,13 +888,19 @@ public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { * @return Returns true iff this call caused an actual sync operation otherwise false */ public boolean ensureSynced(Location location, long globalCheckpoint) throws IOException { - try (ReleasableLock lock = readLock.acquire()) { - // if we have a new generation and the persisted global checkpoint is greater than or equal to the sync global checkpoint it's - // already synced - long persistedGlobalCheckpoint = current.getLastSyncedCheckpoint().globalCheckpoint; - if (location.generation == current.getGeneration() || persistedGlobalCheckpoint < globalCheckpoint) { - ensureOpen(); - return current.syncUpTo(location.translogLocation + location.size, globalCheckpoint); + try { + readLock.lock(); + try { + // if we have a new generation and the persisted global checkpoint is greater than or equal to the sync global checkpoint + // it's + // already synced + long persistedGlobalCheckpoint = current.getLastSyncedCheckpoint().globalCheckpoint; + if (location.generation == current.getGeneration() || persistedGlobalCheckpoint < globalCheckpoint) { + ensureOpen(); + return current.syncUpTo(location.translogLocation + location.size, globalCheckpoint); + } + } finally { + readLock.unlock(); } } catch (final Exception ex) { closeOnTragicEvent(ex); @@ -865,7 +919,6 @@ public boolean ensureSynced(Location location, long globalCheckpoint) throws IOE */ protected void closeOnTragicEvent(final Exception ex) { // we can not hold a read lock here because closing will attempt to obtain a write lock and that would result in self-deadlock - assert readLock.isHeldByCurrentThread() == false : Thread.currentThread().getName(); if (tragedy.get() != null) { try { close(); @@ -887,7 +940,8 @@ protected void closeOnTragicEvent(final Exception ex) { */ public TranslogStats stats() { // acquire lock to make the two numbers roughly consistent (no file change half way) - try (ReleasableLock lock = readLock.acquire()) { + readLock.lock(); + try { final long uncommittedGen = minGenerationForSeqNo(deletionPolicy.getLocalCheckpointOfSafeCommit() + 1, current, readers); return new TranslogStats( totalOperations(), @@ -896,6 +950,8 @@ public TranslogStats stats() { sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge() ); + } finally { + readLock.unlock(); } } @@ -908,20 +964,10 @@ public TranslogDeletionPolicy getDeletionPolicy() { return deletionPolicy; } - public static class Location implements Comparable { + public record Location(long generation, long translogLocation, int size) implements Comparable { public static Location EMPTY = new Location(0, 0, 0); - public final long generation; - public final long translogLocation; - public final int size; - - public Location(long generation, long translogLocation, int size) { - this.generation = generation; - this.translogLocation = translogLocation; - this.size = size; - } - @Override public String toString() { return "[generation: " + generation + ", location: " + translogLocation + ", size: " + size + "]"; @@ -929,38 +975,10 @@ public String toString() { @Override public int compareTo(Location o) { - if (generation == o.generation) { - return Long.compare(translogLocation, o.translogLocation); + int result = Long.compare(generation, o.generation); + if (result == 0) { + result = Long.compare(translogLocation, o.translogLocation); } - return Long.compare(generation, o.generation); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Location location = (Location) o; - - if (generation != location.generation) { - return false; - } - if (translogLocation != location.translogLocation) { - return false; - } - return size == location.size; - - } - - @Override - public int hashCode() { - int result = Long.hashCode(generation); - result = 31 * result + Long.hashCode(translogLocation); - result = 31 * result + size; return result; } } @@ -1602,8 +1620,11 @@ public static void writeOperationWithSize(BytesStreamOutput out, Translog.Operat * @return the minimum generation for the sequence number */ public TranslogGeneration getMinGenerationForSeqNo(final long seqNo) { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { return new TranslogGeneration(translogUUID, minGenerationForSeqNo(seqNo, current, readers)); + } finally { + readLock.unlock(); } } @@ -1627,7 +1648,8 @@ public void rollGeneration() throws IOException { if (current.totalOperations() == 0 && primaryTermSupplier.getAsLong() == current.getPrimaryTerm()) { return; } - try (Releasable ignored = writeLock.acquire()) { + writeLock.lock(); + try { ensureOpen(); try { final TranslogReader reader = current.closeIntoReader(); @@ -1642,6 +1664,8 @@ public void rollGeneration() throws IOException { closeOnTragicEvent(e); throw e; } + } finally { + writeLock.unlock(); } } @@ -1657,7 +1681,8 @@ void syncBeforeRollGeneration() throws IOException { */ public void trimUnreferencedReaders() throws IOException { // first check under read lock if any readers can be trimmed - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { if (closed.get()) { // we're shutdown potentially on some tragic event, don't delete anything return; @@ -1665,11 +1690,14 @@ public void trimUnreferencedReaders() throws IOException { if (getMinReferencedGen() == getMinFileGeneration()) { return; } + } finally { + readLock.unlock(); } // move most of the data to disk to reduce the time the write lock is held sync(); - try (ReleasableLock ignored = writeLock.acquire()) { + writeLock.lock(); + try { if (closed.get()) { // we're shutdown potentially on some tragic event, don't delete anything return; @@ -1701,11 +1729,12 @@ public void trimUnreferencedReaders() throws IOException { } catch (final Exception ex) { closeOnTragicEvent(ex); throw ex; + } finally { + writeLock.unlock(); } } private long getMinReferencedGen() { - assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread(); long minReferencedGen = Math.min( deletionPolicy.getMinTranslogGenRequiredByLocks(), minGenerationForSeqNo(deletionPolicy.getLocalCheckpointOfSafeCommit() + 1, current, readers) @@ -1736,29 +1765,23 @@ void deleteReaderFiles(TranslogReader reader) { } void closeFilesIfNoPendingRetentionLocks() throws IOException { - try (ReleasableLock ignored = writeLock.acquire()) { + writeLock.lock(); + try { if (closed.get() && deletionPolicy.pendingTranslogRefCount() == 0) { logger.trace("closing files. translog is closed and there are no pending retention locks"); ArrayList toClose = new ArrayList<>(readers); toClose.add(current); IOUtils.close(toClose); } + } finally { + writeLock.unlock(); } } /** * References a transaction log generation */ - public static final class TranslogGeneration { - public final String translogUUID; - public final long translogFileGeneration; - - public TranslogGeneration(String translogUUID, long translogFileGeneration) { - this.translogUUID = translogUUID; - this.translogFileGeneration = translogFileGeneration; - } - - } + public record TranslogGeneration(String translogUUID, long translogFileGeneration) {} /** * Returns the current generation of this translog. This corresponds to the latest uncommitted translog generation @@ -1835,13 +1858,16 @@ public String getTranslogUUID() { * existing readers, this value is not necessary to be the max seq_no of all operations have been stored in this translog. */ public long getMaxSeqNo() { - try (ReleasableLock ignored = readLock.acquire()) { + readLock.lock(); + try { ensureOpen(); final OptionalLong maxSeqNo = Stream.concat(readers.stream(), Stream.of(current)) .mapToLong(reader -> reader.getCheckpoint().maxSeqNo) .max(); assert maxSeqNo.isPresent() : "must have at least one translog generation"; return maxSeqNo.getAsLong(); + } finally { + readLock.unlock(); } } diff --git a/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java new file mode 100644 index 0000000000000..ddeb3f370be12 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.shard.IndexLongFieldRange; + +/** + * Data holder of timestamp fields held in cluster state IndexMetadata. + */ +public final class DateFieldRangeInfo { + + private final DateFieldMapper.DateFieldType timestampFieldType; + private final IndexLongFieldRange timestampRange; + private final DateFieldMapper.DateFieldType eventIngestedFieldType; + private final IndexLongFieldRange eventIngestedRange; + + public DateFieldRangeInfo( + DateFieldMapper.DateFieldType timestampFieldType, + IndexLongFieldRange timestampRange, + DateFieldMapper.DateFieldType eventIngestedFieldType, + IndexLongFieldRange eventIngestedRange + ) { + this.timestampFieldType = timestampFieldType; + this.timestampRange = timestampRange; + this.eventIngestedFieldType = eventIngestedFieldType; + this.eventIngestedRange = eventIngestedRange; + } + + public DateFieldMapper.DateFieldType getTimestampFieldType() { + return timestampFieldType; + } + + public IndexLongFieldRange getTimestampRange() { + return timestampRange; + } + + public DateFieldMapper.DateFieldType getEventIngestedFieldType() { + return eventIngestedFieldType; + } + + public IndexLongFieldRange getEventIngestedRange() { + return eventIngestedRange; + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 17e0105d59d8c..03df21531d4cc 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -41,6 +41,7 @@ import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; +import org.elasticsearch.index.mapper.IndexModeFieldMapper; import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.index.mapper.IpScriptFieldType; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -258,6 +259,7 @@ private static Map initBuiltInMetadataMa builtInMetadataMappers.put(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.PARSER); builtInMetadataMappers.put(TimeSeriesRoutingHashFieldMapper.NAME, TimeSeriesRoutingHashFieldMapper.PARSER); builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); + builtInMetadataMappers.put(IndexModeFieldMapper.NAME, IndexModeFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); builtInMetadataMappers.put(IgnoredSourceFieldMapper.NAME, IgnoredSourceFieldMapper.PARSER); builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 199bbc54fa3d6..203d7d5a0aba8 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -98,7 +98,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; @@ -683,9 +682,6 @@ public T withTempIndexService( CheckedFunction indexServiceConsumer ) throws IOException, E { final Index index = indexMetadata.getIndex(); - if (hasIndex(index)) { - throw new ResourceAlreadyExistsException(index); - } List finalListeners = List.of( // double check that shard is not created. new IndexEventListener() { @@ -1767,7 +1763,13 @@ public DataRewriteContext getDataRewriteContext(LongSupplier nowInMillis) { } public CoordinatorRewriteContextProvider getCoordinatorRewriteContextProvider(LongSupplier nowInMillis) { - return new CoordinatorRewriteContextProvider(parserConfig, client, nowInMillis, clusterService::state, this::getTimestampFieldType); + return new CoordinatorRewriteContextProvider( + parserConfig, + client, + nowInMillis, + clusterService::state, + this::getTimestampFieldTypeInfo + ); } /** @@ -1857,14 +1859,16 @@ public boolean allPendingDanglingIndicesWritten() { } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, or - * - the field is not a timestamp field. + * - the mapping is not known yet, or + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - return timestampFieldMapperService.getTimestampFieldType(index); + public DateFieldRangeInfo getTimestampFieldTypeInfo(Index index) { + return timestampFieldMapperService.getTimestampFieldTypeMap(index); } public IndexScopedSettings getIndexScopedSettings() { diff --git a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java index 15e409df552bd..9b23762e29490 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -33,6 +34,7 @@ import java.util.Map; import java.util.Objects; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -40,8 +42,9 @@ import static org.elasticsearch.core.Strings.format; /** - * Tracks the mapping of the {@code @timestamp} field of immutable indices that expose their timestamp range in their index metadata. - * Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of this one field from the mapping here. + * Tracks the mapping of the '@timestamp' and 'event.ingested' fields of immutable indices that expose their timestamp range in their + * index metadata. Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of these two fields + * from the mapping here, since timestamp fields can have millis or nanos level resolution. */ public class TimestampFieldMapperService extends AbstractLifecycleComponent implements ClusterStateApplier { @@ -51,10 +54,12 @@ public class TimestampFieldMapperService extends AbstractLifecycleComponent impl private final ExecutorService executor; // single thread to construct mapper services async as needed /** - * The type of the {@code @timestamp} field keyed by index. Futures may be completed with {@code null} to indicate that there is - * no usable {@code @timestamp} field. + * The type of the 'event.ingested' and/or '@timestamp' fields keyed by index. + * The inner map is keyed by field name ('@timestamp' or 'event.ingested'). + * Futures may be completed with {@code null} to indicate that there is + * no usable timestamp field. */ - private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); + private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); public TimestampFieldMapperService(Settings settings, ThreadPool threadPool, IndicesService indicesService) { this.indicesService = indicesService; @@ -100,8 +105,8 @@ public void applyClusterState(ClusterChangedEvent event) { final Index index = indexMetadata.getIndex(); if (hasUsefulTimestampField(indexMetadata) && fieldTypesByIndex.containsKey(index) == false) { - logger.trace("computing timestamp mapping for {}", index); - final PlainActionFuture future = new PlainActionFuture<>(); + logger.trace("computing timestamp mapping(s) for {}", index); + final PlainActionFuture future = new PlainActionFuture<>(); fieldTypesByIndex.put(index, future); final IndexService indexService = indicesService.indexService(index); @@ -146,34 +151,53 @@ private static boolean hasUsefulTimestampField(IndexMetadata indexMetadata) { return true; } - final IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); - return timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN; + IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + if (timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN) { + return true; + } + + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + return eventIngestedRange.isComplete() && eventIngestedRange != IndexLongFieldRange.UNKNOWN; } - private static DateFieldMapper.DateFieldType fromMapperService(MapperService mapperService) { - final MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); - if (mappedFieldType instanceof DateFieldMapper.DateFieldType) { - return (DateFieldMapper.DateFieldType) mappedFieldType; - } else { + private static DateFieldRangeInfo fromMapperService(MapperService mapperService) { + DateFieldMapper.DateFieldType timestampFieldType = null; + DateFieldMapper.DateFieldType eventIngestedFieldType = null; + + MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + timestampFieldType = dateFieldType; + } + mappedFieldType = mapperService.fieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + eventIngestedFieldType = dateFieldType; + } + if (timestampFieldType == null && eventIngestedFieldType == null) { return null; } + // the mapper only fills in the field types, not the actual range values + return new DateFieldRangeInfo(timestampFieldType, null, eventIngestedFieldType, null); } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, * - the mapping is not known yet, or - * - the field is not a timestamp field. + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - final PlainActionFuture future = fieldTypesByIndex.get(index); + public DateFieldRangeInfo getTimestampFieldTypeMap(Index index) { + final PlainActionFuture future = fieldTypesByIndex.get(index); if (future == null || future.isDone() == false) { return null; } - // call non-blocking actionResult() as we could be on a network or scheduler thread which we must not block - return future.actionResult(); + // call non-blocking result() as we could be on a network or scheduler thread which we must not block + try { + return future.result(); + } catch (ExecutionException e) { + throw new UncategorizedExecutionException("An error occurred fetching timestamp field type for " + index, e); + } } - } diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index a2e30b9e18098..d91b19fda1185 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.SuppressForbidden; @@ -65,7 +66,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { Property.NodeScope ); - public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.memorySizeSetting( + public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = new Setting<>( "indices.breaker.total.limit", settings -> { if (USE_REAL_MEMORY_USAGE_SETTING.get(settings)) { @@ -74,6 +75,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { return "70%"; } }, + (s) -> MemorySizeValue.parseHeapRatioOrDeprecatedByteSizeValue(s, "indices.breaker.total.limit", 50), Property.Dynamic, Property.NodeScope ); diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index d409c3009ef5b..dd5ad26c58b12 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.gateway.GatewayService; @@ -417,7 +418,10 @@ protected void doRun() throws Exception { // lock is released so it's guaranteed to be deleted by the time we get the lock indicesService.processPendingDeletes(index, indexSettings, timeout); } catch (ShardLockObtainFailedException exc) { - logger.warn("[{}] failed to lock all shards for index - timed out after [{}]]", index, timeout); + logger.warn( + Strings.format("[%s] failed to lock all shards for index - timed out after [%s]]", index, timeout), + exc + ); } catch (InterruptedException e) { logger.warn("[{}] failed to lock all shards for index - interrupted", index); } @@ -905,6 +909,7 @@ private void updateShard(ShardRouting shardRouting, Shard shard, ClusterState cl + state + "], mark shard as started", shard.getTimestampRange(), + shard.getEventIngestedRange(), ActionListener.noop(), clusterState ); @@ -966,12 +971,17 @@ private RecoveryListener(final ShardRouting shardRouting, final long primaryTerm } @Override - public void onRecoveryDone(final RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { + public void onRecoveryDone( + final RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ) { shardStateAction.shardStarted( shardRouting, primaryTerm, "after " + state.getRecoverySource(), timestampMillisFieldRange, + eventIngestedMillisFieldRange, ActionListener.noop() ); } @@ -1123,6 +1133,13 @@ public interface Shard { @Nullable ShardLongFieldRange getTimestampRange(); + /** + * @return the range of the {@code @event.ingested} field for this shard, or {@link ShardLongFieldRange#EMPTY} if this field is not + * found, or {@link ShardLongFieldRange#UNKNOWN} if its range is not fixed. + */ + @Nullable + ShardLongFieldRange getEventIngestedRange(); + /** * Updates the shard state based on an incoming cluster state: * - Updates and persists the new routing value. diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 3447cc73a4288..ac618ac9308c4 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -517,7 +517,11 @@ private static void logGlobalCheckpointWarning(Logger logger, long startingSeqNo } public interface RecoveryListener { - void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange); + void onRecoveryDone( + RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ); void onRecoveryFailure(RecoveryFailedException e, boolean sendShardFailure); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 538cfdabef324..df2a9d16ebd6a 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -1052,7 +1052,7 @@ boolean hasSameLegacySyncId(Store.MetadataSnapshot source, Store.MetadataSnapsho } SequenceNumbers.CommitInfo sourceSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(source.commitUserData().entrySet()); SequenceNumbers.CommitInfo targetSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(target.commitUserData().entrySet()); - if (sourceSeqNos.localCheckpoint != targetSeqNos.localCheckpoint || targetSeqNos.maxSeqNo != sourceSeqNos.maxSeqNo) { + if (sourceSeqNos.localCheckpoint() != targetSeqNos.localCheckpoint() || targetSeqNos.maxSeqNo() != sourceSeqNos.maxSeqNo()) { final String message = "try to recover " + request.shardId() + " with sync id but " diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index dda7203fa7b0e..3232099831d8b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -323,7 +323,7 @@ public void markAsDone() { indexShard.postRecovery("peer recovery done", ActionListener.runBefore(new ActionListener<>() { @Override public void onResponse(Void unused) { - listener.onRecoveryDone(state(), indexShard.getTimestampRange()); + listener.onRecoveryDone(state(), indexShard.getTimestampRange(), indexShard.getEventIngestedRange()); } @Override diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 0a60886797813..357fea343ea55 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -745,7 +745,7 @@ protected void doRun() { } final int slot = i; final Releasable ref = refs.acquire(); - final DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(); + final DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(indexRequest); final IngestDocument ingestDocument = newIngestDocument(indexRequest, documentSizeObserver); final org.elasticsearch.script.Metadata originalDocumentMetadata = ingestDocument.getMetadata().clone(); // the document listener gives us three-way logic: a document can fail processing (1), or it can diff --git a/server/src/main/java/org/elasticsearch/node/NodeClosedException.java b/server/src/main/java/org/elasticsearch/node/NodeClosedException.java index 4a99c9be7b78a..d2e0f71426df0 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeClosedException.java +++ b/server/src/main/java/org/elasticsearch/node/NodeClosedException.java @@ -28,4 +28,9 @@ public NodeClosedException(DiscoveryNode node) { public NodeClosedException(StreamInput in) throws IOException { super(in); } + + @Override + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace + } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index bcf8451e5fe54..aa0f9b8552d22 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -883,7 +883,7 @@ record PluginServiceInstances( circuitBreakerService, createUsageService(), systemIndices, - telemetryProvider.getTracer(), + telemetryProvider, clusterService, rerouteService, buildReservedStateHandlers( diff --git a/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java index a75c4b3352475..396b5bd5c59a2 100644 --- a/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java +++ b/server/src/main/java/org/elasticsearch/persistent/NodePersistentTasksExecutor.java @@ -35,7 +35,6 @@ protected void doRun() throws Exception { } catch (Exception ex) { task.markAsFailed(ex); } - } }); } diff --git a/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java index 44653dcf8b5fe..29e4efe576116 100644 --- a/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java @@ -15,7 +15,7 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestInterceptor; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.usage.UsageService; import java.util.function.UnaryOperator; @@ -58,7 +58,7 @@ default RestController getRestController( NodeClient client, CircuitBreakerService circuitBreakerService, UsageService usageService, - Tracer tracer + TelemetryProvider telemetryProvider ) { return null; } diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java index 6fe1e48b25272..d29b893447be0 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java @@ -8,6 +8,7 @@ package org.elasticsearch.plugins.internal; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.index.mapper.MapperService; /** @@ -17,20 +18,6 @@ public interface DocumentParsingProvider { DocumentParsingProvider EMPTY_INSTANCE = new DocumentParsingProvider() { }; - /** - * @return a new 'empty' observer to use when observing parsing - */ - default DocumentSizeObserver newDocumentSizeObserver() { - return DocumentSizeObserver.EMPTY_INSTANCE; - } - - /** - * @return an observer with a previously observed value (fixed to this value, not continuing) - */ - default DocumentSizeObserver newFixedSizeDocumentObserver(long normalisedBytesParsed) { - return DocumentSizeObserver.EMPTY_INSTANCE; - } - /** * @return an instance of a reporter to use when parsing has been completed and indexing successful */ @@ -49,4 +36,10 @@ default DocumentSizeAccumulator createDocumentSizeAccumulator() { return DocumentSizeAccumulator.EMPTY_INSTANCE; } + /** + * @return an observer + */ + default DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { + return DocumentSizeObserver.EMPTY_INSTANCE; + } } diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java index 149b89844a74c..386a90b65b60f 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java @@ -28,6 +28,7 @@ public XContentParser wrapParser(XContentParser xContentParser) { public long normalisedBytesParsed() { return 0; } + }; /** @@ -40,7 +41,17 @@ public long normalisedBytesParsed() { /** * Returns the state gathered during parsing + * * @return a number representing a state parsed */ long normalisedBytesParsed(); + + /** + * Indicates if an observer was used on an update request with script + * + * @return true if update was done by script, false otherwise + */ + default boolean isUpdateByScript() { + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java b/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java index 4c34f2e192a26..0dcb28278a66d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java +++ b/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java @@ -8,6 +8,8 @@ package org.elasticsearch.repositories; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; @@ -30,6 +32,8 @@ */ public final class ShardGenerations { + private static final Logger logger = LogManager.getLogger(ShardGenerations.class); + public static final ShardGenerations EMPTY = new ShardGenerations(Collections.emptyMap()); /** @@ -88,7 +92,7 @@ public Collection indices() { } /** - * Computes the obsolete shard index generations that can be deleted once this instance was written to the repository. + * Computes the obsolete shard index generations that can be deleted once this instance is written to the repository. * Note: This method should only be used when finalizing a snapshot and we can safely assume that data has only been added but not * removed from shard paths. * @@ -109,6 +113,13 @@ public Map> obsoleteShardGenerations(Shar // Since this method assumes only additions and no removals of shards, a null updated generation means no update if (updatedGeneration != null && oldGeneration != null && oldGeneration.equals(updatedGeneration) == false) { obsoleteShardIndices.put(i, oldGeneration); + logger.debug( + "Marking snapshot generation [{}] for cleanup. The new generation is [{}]. Index [{}], shard ID [{}]", + oldGeneration, + updatedGeneration, + indexId, + i + ); } } result.put(indexId, Collections.unmodifiableMap(obsoleteShardIndices)); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 8f55bf16c1674..5b7a11969973d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -3946,4 +3946,16 @@ public boolean hasAtomicOverwrites() { public int getReadBufferSizeInBytes() { return bufferSize; } + + /** + * @return extra information to be included in the exception message emitted on failure of a repository analysis. + */ + public String getAnalysisFailureExtraDetail() { + return Strings.format( + """ + Elasticsearch observed the storage system underneath this repository behaved incorrectly which indicates it is not \ + suitable for use with Elasticsearch snapshots. See [%s] for further information.""", + ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS + ); + } } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/TransformState.java b/server/src/main/java/org/elasticsearch/reservedstate/TransformState.java index 05c3d2be5d174..a958b0ea1e29d 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/TransformState.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/TransformState.java @@ -8,24 +8,13 @@ package org.elasticsearch.reservedstate; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import java.util.Set; -import java.util.function.Consumer; /** * A {@link ClusterState} wrapper used by the ReservedClusterStateService to pass the * current state as well as previous keys set by an {@link ReservedClusterStateHandler} to each transform * step of the cluster state update. - * - * Each {@link ReservedClusterStateHandler} can also provide a non cluster state transform consumer that should run after - * the cluster state is fully validated. This allows for handlers to perform extra steps, like clearing caches or saving - * other state outside the cluster state. The consumer, if provided, must return a {@link NonStateTransformResult} with - * the keys that will be saved as reserved in the cluster state. */ -public record TransformState(ClusterState state, Set keys, Consumer> nonStateTransform) { - public TransformState(ClusterState state, Set keys) { - this(state, keys, null); - } -} +public record TransformState(ClusterState state, Set keys) {} diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index a281db9f02383..20a115a484ab5 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -13,7 +13,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; @@ -22,7 +21,6 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.core.Tuple; -import org.elasticsearch.reservedstate.NonStateTransformResult; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -30,8 +28,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; @@ -156,7 +152,6 @@ public void initEmpty(String namespace, ActionListener lis new ReservedStateUpdateTask( namespace, emptyState, - List.of(), Map.of(), List.of(), // error state should not be possible since there is no metadata being parsed or processed @@ -210,65 +205,44 @@ public void process(String namespace, ReservedStateChunk reservedStateChunk, Con return; } - // We trial run all handler validations to ensure that we can process all of the cluster state error free. During - // the trial run we collect 'consumers' (functions) for any non cluster state transforms that need to run. - var trialRunResult = trialRun(namespace, state, reservedStateChunk, orderedHandlers); + // We trial run all handler validations to ensure that we can process all of the cluster state error free. + var trialRunErrors = trialRun(namespace, state, reservedStateChunk, orderedHandlers); // this is not using the modified trial state above, but that doesn't matter, we're just setting errors here - var error = checkAndReportError(namespace, trialRunResult.errors, reservedStateVersion); + var error = checkAndReportError(namespace, trialRunErrors, reservedStateVersion); if (error != null) { errorListener.accept(error); return; } - - // Since we have validated that the cluster state update can be correctly performed in the trial run, we now - // execute the non cluster state transforms. These are assumed to be async and we continue with the cluster state update - // after all have completed. This part of reserved cluster state update is non-atomic, some or all of the non-state - // transformations can succeed, and we can fail to eventually write the reserved cluster state. - executeNonStateTransformationSteps(trialRunResult.nonStateTransforms, new ActionListener<>() { - @Override - public void onResponse(Collection nonStateTransformResults) { - // Once all of the non-state transformation results complete, we can proceed to - // do the final save of the cluster state. The non-state transformation reserved keys are applied - // to the reserved state after all other key handlers. - updateTaskQueue.submitTask( - "reserved cluster state [" + namespace + "]", - new ReservedStateUpdateTask( - namespace, - reservedStateChunk, - nonStateTransformResults, - handlers, - orderedHandlers, - ReservedClusterStateService.this::updateErrorState, - new ActionListener<>() { - @Override - public void onResponse(ActionResponse.Empty empty) { - logger.info("Successfully applied new reserved cluster state for namespace [{}]", namespace); - errorListener.accept(null); - } - - @Override - public void onFailure(Exception e) { - // Don't spam the logs on repeated errors - if (isNewError(existingMetadata, reservedStateVersion.version())) { - logger.debug("Failed to apply reserved cluster state", e); - errorListener.accept(e); - } else { - errorListener.accept(null); - } - } + updateTaskQueue.submitTask( + "reserved cluster state [" + namespace + "]", + new ReservedStateUpdateTask( + namespace, + reservedStateChunk, + handlers, + orderedHandlers, + ReservedClusterStateService.this::updateErrorState, + new ActionListener<>() { + @Override + public void onResponse(ActionResponse.Empty empty) { + logger.info("Successfully applied new reserved cluster state for namespace [{}]", namespace); + errorListener.accept(null); + } + + @Override + public void onFailure(Exception e) { + // Don't spam the logs on repeated errors + if (isNewError(existingMetadata, reservedStateVersion.version())) { + logger.debug("Failed to apply reserved cluster state", e); + errorListener.accept(e); + } else { + errorListener.accept(null); } - ), - null - ); - } - - @Override - public void onFailure(Exception e) { - // If we encounter an error while runnin the non-state transforms, we avoid saving any cluster state. - errorListener.accept(checkAndReportError(namespace, List.of(stackTrace(e)), reservedStateVersion)); - } - }); + } + } + ), + null + ); } // package private for testing @@ -324,14 +298,13 @@ public void onFailure(Exception e) { /** * Goes through all of the handlers, runs the validation and the transform part of the cluster state. *

      - * While running the handlers we also collect any non cluster state transformation consumer actions that - * need to be performed asynchronously before we attempt to save the cluster state. The trial run does not - * result in an update of the cluster state, it's only purpose is to verify if we can correctly perform a - * cluster state update with the given reserved state chunk. + * The trial run does not result in an update of the cluster state, it's only purpose is to verify + * if we can correctly perform a cluster state update with the given reserved state chunk. * * Package private for testing + * @return Any errors that occured */ - TrialRunResult trialRun( + List trialRun( String namespace, ClusterState currentState, ReservedStateChunk stateChunk, @@ -341,7 +314,6 @@ TrialRunResult trialRun( Map reservedState = stateChunk.state(); List errors = new ArrayList<>(); - List>> nonStateTransforms = new ArrayList<>(); ClusterState state = currentState; @@ -351,39 +323,12 @@ TrialRunResult trialRun( Set existingKeys = keysForHandler(existingMetadata, handlerName); TransformState transformState = handler.transform(reservedState.get(handlerName), new TransformState(state, existingKeys)); state = transformState.state(); - if (transformState.nonStateTransform() != null) { - nonStateTransforms.add(transformState.nonStateTransform()); - } } catch (Exception e) { errors.add(format("Error processing %s state change: %s", handler.name(), stackTrace(e))); } } - return new TrialRunResult(nonStateTransforms, errors); - } - - /** - * Runs the non cluster state transformations asynchronously, collecting the {@link NonStateTransformResult} objects. - *

      - * Once all non cluster state transformations have completed, we submit the cluster state update task, which - * updates all of the handler state, including the keys produced by the non cluster state transforms. The new reserved - * state version isn't written to the cluster state until the cluster state task runs. - * - * Package private for testing - */ - static void executeNonStateTransformationSteps( - List>> nonStateTransforms, - ActionListener> listener - ) { - final List result = Collections.synchronizedList(new ArrayList<>(nonStateTransforms.size())); - try (var listeners = new RefCountingListener(listener.map(ignored -> result))) { - for (var transform : nonStateTransforms) { - // non cluster state transforms don't modify the cluster state, they however are given a chance to return a more - // up-to-date version of the modified keys we should save in the reserved state. These calls are - // async and report back when they are done through the postTasksListener. - transform.accept(listeners.acquire(result::add)); - } - } + return errors; } /** @@ -449,9 +394,4 @@ private void addStateHandler(String key, Set keys, LinkedHashSet public void installStateHandler(ReservedClusterStateHandler handler) { this.handlers.put(handler.name(), handler); } - - /** - * Helper record class to combine the result of a trial run, non cluster state actions and any errors - */ - record TrialRunResult(List>> nonStateTransforms, List errors) {} } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index 1ac42a91736c3..93d3619889a48 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.reservedstate.NonStateTransformResult; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; @@ -51,12 +50,10 @@ public class ReservedStateUpdateTask implements ClusterStateTaskListener { private final Collection orderedHandlers; private final Consumer errorReporter; private final ActionListener listener; - private final Collection nonStateTransformResults; public ReservedStateUpdateTask( String namespace, ReservedStateChunk stateChunk, - Collection nonStateTransformResults, Map> handlers, Collection orderedHandlers, Consumer errorReporter, @@ -64,7 +61,6 @@ public ReservedStateUpdateTask( ) { this.namespace = namespace; this.stateChunk = stateChunk; - this.nonStateTransformResults = nonStateTransformResults; this.handlers = handlers; this.orderedHandlers = orderedHandlers; this.errorReporter = errorReporter; @@ -115,12 +111,6 @@ protected ClusterState execute(final ClusterState currentState) { checkAndThrowOnError(errors, reservedStateVersion); - // Once we have set all of the handler state from the cluster state update tasks, we add the reserved keys - // from the non cluster state transforms. - for (var transform : nonStateTransformResults) { - reservedMetadataBuilder.putHandler(new ReservedStateHandlerMetadata(transform.handlerName(), transform.updatedKeys())); - } - // Remove the last error if we had previously encountered any in prior processing of reserved state reservedMetadataBuilder.errorMetadata(null); diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index b08f6ed81017a..3f9c0dbaa11d6 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -39,6 +39,8 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.transport.Transports; import org.elasticsearch.usage.SearchUsageHolder; @@ -86,6 +88,9 @@ public class RestController implements HttpServerTransport.Dispatcher { static final String ELASTIC_PRODUCT_HTTP_HEADER_VALUE = "Elasticsearch"; static final Set RESERVED_PATHS = Set.of("/__elb_health__", "/__elb_health__/zk", "/_health", "/_health/zk"); private static final BytesReference FAVICON_RESPONSE; + public static final String STATUS_CODE_KEY = "es_rest_status_code"; + public static final String HANDLER_NAME_KEY = "es_rest_handler_name"; + public static final String REQUEST_METHOD_KEY = "es_rest_request_method"; static { try (InputStream stream = RestController.class.getResourceAsStream("/config/favicon.ico")) { @@ -107,18 +112,23 @@ public class RestController implements HttpServerTransport.Dispatcher { private final UsageService usageService; private final Tracer tracer; + private final LongCounter requestsCounter; // If true, the ServerlessScope annotations will be enforced private final ServerlessApiProtections apiProtections; + public static final String METRIC_REQUESTS_TOTAL = "es.rest.requests.total"; + public RestController( RestInterceptor restInterceptor, NodeClient client, CircuitBreakerService circuitBreakerService, UsageService usageService, - Tracer tracer + TelemetryProvider telemetryProvider ) { this.usageService = usageService; - this.tracer = tracer; + this.tracer = telemetryProvider.getTracer(); + this.requestsCounter = telemetryProvider.getMeterRegistry() + .registerLongCounter(METRIC_REQUESTS_TOTAL, "The total number of rest requests/responses processed", "unit"); if (restInterceptor == null) { restInterceptor = (request, channel, targetHandler, listener) -> listener.onResponse(Boolean.TRUE); } @@ -355,6 +365,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th sendFailure(channel, (Exception) e.getCause()); } else { channel.sendResponse(new RestResponse(channel, BAD_REQUEST, e)); + recordRequestMetric(BAD_REQUEST, requestsCounter); } } catch (final IOException e) { if (cause != null) { @@ -362,6 +373,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } logger.warn("failed to send bad request response", e); channel.sendResponse(new RestResponse(INTERNAL_SERVER_ERROR, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); + recordRequestMetric(INTERNAL_SERVER_ERROR, requestsCounter); } } @@ -502,8 +514,10 @@ public void onFailure(Exception e) { @SuppressWarnings("unused") protected void validateRequest(RestRequest request, RestHandler handler, NodeClient client) throws ElasticsearchStatusException {} - private static void sendFailure(RestChannel responseChannel, Exception e) throws IOException { - responseChannel.sendResponse(new RestResponse(responseChannel, e)); + private void sendFailure(RestChannel responseChannel, Exception e) throws IOException { + var restResponse = new RestResponse(responseChannel, e); + responseChannel.sendResponse(restResponse); + recordRequestMetric(restResponse.status(), requestsCounter); } /** @@ -602,6 +616,7 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel } catch (IllegalArgumentException e) { startTrace(threadContext, channel); channel.sendResponse(RestResponse.createSimpleErrorResponse(channel, BAD_REQUEST, e.getMessage())); + recordRequestMetric(BAD_REQUEST, requestsCounter); return; } @@ -629,7 +644,8 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel } } else { startTrace(threadContext, channel, handlers.getPath()); - dispatchRequest(request, channel, handler, handlers, threadContext); + var decoratedChannel = new MeteringRestChannelDecorator(channel, requestsCounter, handler.getConcreteRestHandler()); + dispatchRequest(request, decoratedChannel, handler, handlers, threadContext); return; } } @@ -689,7 +705,7 @@ public SearchUsageHolder getSearchUsageHolder() { * HTTP/1.1 - * 10.4.6 - 405 Method Not Allowed). */ - private static void handleUnsupportedHttpMethod( + private void handleUnsupportedHttpMethod( String uri, @Nullable RestRequest.Method method, final RestChannel channel, @@ -712,9 +728,11 @@ private static void handleUnsupportedHttpMethod( restResponse.addHeader("Allow", Strings.collectionToDelimitedString(validMethodSet, ",")); } channel.sendResponse(restResponse); + recordRequestMetric(METHOD_NOT_ALLOWED, requestsCounter); } catch (final IOException e) { logger.warn("failed to send bad request response", e); channel.sendResponse(new RestResponse(INTERNAL_SERVER_ERROR, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)); + recordRequestMetric(INTERNAL_SERVER_ERROR, requestsCounter); } } @@ -725,7 +743,7 @@ private static void handleUnsupportedHttpMethod( * HTTP/1.1 - 9.2 * - Options). */ - private static void handleOptionsRequest(RestChannel channel, Set validMethodSet) { + private void handleOptionsRequest(RestChannel channel, Set validMethodSet) { RestResponse restResponse = new RestResponse(OK, TEXT_CONTENT_TYPE, BytesArray.EMPTY); // When we have an OPTIONS HTTP request and no valid handlers, simply send OK by default (with the Access Control Origin header // which gets automatically added). @@ -733,13 +751,14 @@ private static void handleOptionsRequest(RestChannel channel, Set getValidHandlerMethodSet(String rawPath) { return validMethods; } - private static final class ResourceHandlingHttpChannel implements RestChannel { + private static void recordRequestMetric(RestStatus statusCode, String handlerName, String requestMethod, LongCounter requestsCounter) { + try { + Map attributes = Map.of( + STATUS_CODE_KEY, + statusCode.getStatus(), + HANDLER_NAME_KEY, + handlerName, + REQUEST_METHOD_KEY, + requestMethod + ); + requestsCounter.incrementBy(1, attributes); + } catch (Exception ex) { + logger.error("Cannot track request status code", ex); + } + } + + private static void recordRequestMetric(RestStatus statusCode, LongCounter requestsCounter) { + try { + Map attributes = Map.of(STATUS_CODE_KEY, statusCode.getStatus()); + requestsCounter.incrementBy(1, attributes); + } catch (Exception ex) { + logger.error("Cannot track request status code", ex); + } + } + + private static class DelegatingRestChannel implements RestChannel { + private final RestChannel delegate; - private final CircuitBreakerService circuitBreakerService; - private final int contentLength; - private final MethodHandlers methodHandlers; - private final long startTime; - private final AtomicBoolean closed = new AtomicBoolean(); - ResourceHandlingHttpChannel( - RestChannel delegate, - CircuitBreakerService circuitBreakerService, - int contentLength, - MethodHandlers methodHandlers - ) { + private DelegatingRestChannel(RestChannel delegate) { this.delegate = delegate; - this.circuitBreakerService = circuitBreakerService; - this.contentLength = contentLength; - this.methodHandlers = methodHandlers; - this.startTime = rawRelativeTimeInMillis(); } @Override @@ -843,6 +874,50 @@ public boolean detailedErrorsEnabled() { return delegate.detailedErrorsEnabled(); } + @Override + public void sendResponse(RestResponse response) { + delegate.sendResponse(response); + } + } + + private static final class MeteringRestChannelDecorator extends DelegatingRestChannel { + + private final LongCounter requestsCounter; + private final RestHandler restHandler; + + private MeteringRestChannelDecorator(RestChannel delegate, LongCounter requestCounter, RestHandler restHandler) { + super(delegate); + this.requestsCounter = requestCounter; + this.restHandler = restHandler; + } + + @Override + public void sendResponse(RestResponse response) { + super.sendResponse(response); + recordRequestMetric(response.status(), restHandler.getName(), request().method().name(), requestsCounter); + } + } + + private static final class ResourceHandlingHttpChannel extends DelegatingRestChannel { + private final CircuitBreakerService circuitBreakerService; + private final int contentLength; + private final MethodHandlers methodHandlers; + private final long startTime; + private final AtomicBoolean closed = new AtomicBoolean(); + + ResourceHandlingHttpChannel( + RestChannel delegate, + CircuitBreakerService circuitBreakerService, + int contentLength, + MethodHandlers methodHandlers + ) { + super(delegate); + this.circuitBreakerService = circuitBreakerService; + this.contentLength = contentLength; + this.methodHandlers = methodHandlers; + this.startTime = rawRelativeTimeInMillis(); + } + @Override public void sendResponse(RestResponse response) { boolean success = false; @@ -866,7 +941,7 @@ public void sendResponse(RestResponse response) { } } } - delegate.sendResponse(response); + super.sendResponse(response); success = true; } finally { if (success == false) { diff --git a/server/src/main/java/org/elasticsearch/rest/RestHandler.java b/server/src/main/java/org/elasticsearch/rest/RestHandler.java index c490f68499783..11208a24ceb10 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/RestHandler.java @@ -126,6 +126,10 @@ default boolean mediaTypesValid(RestRequest request) { return request.getXContentType() != null; } + default String getName() { + return this.getClass().getSimpleName(); + } + class Route { private final Method method; diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/SearchResponseMetrics.java b/server/src/main/java/org/elasticsearch/rest/action/search/SearchResponseMetrics.java index 00f1f5d5804d6..bb6163999d153 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/SearchResponseMetrics.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/SearchResponseMetrics.java @@ -8,14 +8,40 @@ package org.elasticsearch.rest.action.search; +import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.metric.LongHistogram; import org.elasticsearch.telemetry.metric.MeterRegistry; +import java.util.Map; + +/** + * Container class for aggregated metrics about search responses. + */ public class SearchResponseMetrics { + public enum ResponseCountTotalStatus { + SUCCESS("succes"), + PARTIAL_FAILURE("partial_failure"), + FAILURE("failure"); + + private final String displayName; + + ResponseCountTotalStatus(String displayName) { + this.displayName = displayName; + } + + public String getDisplayName() { + return displayName; + } + } + + public static final String RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME = "status"; + public static final String TOOK_DURATION_TOTAL_HISTOGRAM_NAME = "es.search_response.took_durations.histogram"; + public static final String RESPONSE_COUNT_TOTAL_COUNTER_NAME = "es.search_response.response_count.total"; private final LongHistogram tookDurationTotalMillisHistogram; + private final LongCounter responseCountTotalCounter; public SearchResponseMetrics(MeterRegistry meterRegistry) { this( @@ -23,16 +49,31 @@ public SearchResponseMetrics(MeterRegistry meterRegistry) { TOOK_DURATION_TOTAL_HISTOGRAM_NAME, "The SearchResponse.took durations in milliseconds, expressed as a histogram", "millis" + ), + meterRegistry.registerLongCounter( + RESPONSE_COUNT_TOTAL_COUNTER_NAME, + "The cumulative total of search responses with an attribute to describe " + + "success, partial failure, or failure, expressed as a single total counter and individual " + + "attribute counters", + "count" ) ); } - private SearchResponseMetrics(LongHistogram tookDurationTotalMillisHistogram) { + private SearchResponseMetrics(LongHistogram tookDurationTotalMillisHistogram, LongCounter responseCountTotalCounter) { this.tookDurationTotalMillisHistogram = tookDurationTotalMillisHistogram; + this.responseCountTotalCounter = responseCountTotalCounter; } public long recordTookTime(long tookTime) { tookDurationTotalMillisHistogram.record(tookTime); return tookTime; } + + public void incrementResponseCount(ResponseCountTotalStatus responseCountTotalStatus) { + responseCountTotalCounter.incrementBy( + 1L, + Map.of(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME, responseCountTotalStatus.getDisplayName()) + ); + } } diff --git a/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java index bccdd5782f277..ad7d74824a1d4 100644 --- a/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java @@ -56,7 +56,7 @@ public static class ByteDenseVectorFunction extends DenseVectorFunction { */ public ByteDenseVectorFunction(ScoreScript scoreScript, DenseVectorDocValuesField field, List queryVector) { super(scoreScript, field); - DenseVector.checkDimensions(field.get().getDims(), queryVector.size()); + field.getElementType().checkDimensions(field.get().getDims(), queryVector.size()); this.queryVector = new byte[queryVector.size()]; float[] validateValues = new float[queryVector.size()]; int queryMagnitude = 0; @@ -168,7 +168,7 @@ public static final class L1Norm { public L1Norm(ScoreScript scoreScript, Object queryVector, String fieldName) { DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName); function = switch (field.getElementType()) { - case BYTE -> { + case BYTE, BIT -> { if (queryVector instanceof List) { yield new ByteL1Norm(scoreScript, field, (List) queryVector); } else if (queryVector instanceof String s) { @@ -219,8 +219,8 @@ public static final class Hamming { @SuppressWarnings("unchecked") public Hamming(ScoreScript scoreScript, Object queryVector, String fieldName) { DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName); - if (field.getElementType() != DenseVectorFieldMapper.ElementType.BYTE) { - throw new IllegalArgumentException("hamming distance is only supported for byte vectors"); + if (field.getElementType() == DenseVectorFieldMapper.ElementType.FLOAT) { + throw new IllegalArgumentException("hamming distance is only supported for byte or bit vectors"); } if (queryVector instanceof List) { function = new ByteHammingDistance(scoreScript, field, (List) queryVector); @@ -278,7 +278,7 @@ public static final class L2Norm { public L2Norm(ScoreScript scoreScript, Object queryVector, String fieldName) { DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName); function = switch (field.getElementType()) { - case BYTE -> { + case BYTE, BIT -> { if (queryVector instanceof List) { yield new ByteL2Norm(scoreScript, field, (List) queryVector); } else if (queryVector instanceof String s) { @@ -342,7 +342,7 @@ public static final class DotProduct { public DotProduct(ScoreScript scoreScript, Object queryVector, String fieldName) { DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName); function = switch (field.getElementType()) { - case BYTE -> { + case BYTE, BIT -> { if (queryVector instanceof List) { yield new ByteDotProduct(scoreScript, field, (List) queryVector); } else if (queryVector instanceof String s) { @@ -406,7 +406,7 @@ public static final class CosineSimilarity { public CosineSimilarity(ScoreScript scoreScript, Object queryVector, String fieldName) { DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName); function = switch (field.getElementType()) { - case BYTE -> { + case BYTE, BIT -> { if (queryVector instanceof List) { yield new ByteCosineSimilarity(scoreScript, field, (List) queryVector); } else if (queryVector instanceof String s) { diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitBinaryDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitBinaryDenseVector.java new file mode 100644 index 0000000000000..10420543ad181 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitBinaryDenseVector.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; + +import java.util.List; + +public class BitBinaryDenseVector extends ByteBinaryDenseVector { + + public BitBinaryDenseVector(byte[] vectorValue, BytesRef docVector, int dims) { + super(vectorValue, docVector, dims); + } + + @Override + public void checkDimensions(int qvDims) { + if (qvDims != dims) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + + qvDims * Byte.SIZE + + "] than the document vectors [" + + dims * Byte.SIZE + + "]." + ); + } + } + + @Override + public int l1Norm(byte[] queryVector) { + return hamming(queryVector); + } + + @Override + public double l1Norm(List queryVector) { + return hamming(queryVector); + } + + @Override + public double l2Norm(byte[] queryVector) { + return Math.sqrt(hamming(queryVector)); + } + + @Override + public double l2Norm(List queryVector) { + return Math.sqrt(hamming(queryVector)); + } + + @Override + public int dotProduct(byte[] queryVector) { + throw new UnsupportedOperationException("dotProduct is not supported for bit vectors."); + } + + @Override + public double cosineSimilarity(float[] queryVector, boolean normalizeQueryVector) { + throw new UnsupportedOperationException("cosineSimilarity is not supported for bit vectors."); + } + + @Override + public double dotProduct(List queryVector) { + throw new UnsupportedOperationException("dotProduct is not supported for bit vectors."); + } + + @Override + public double cosineSimilarity(byte[] queryVector, float qvMagnitude) { + throw new UnsupportedOperationException("cosineSimilarity is not supported for bit vectors."); + } + + @Override + public double cosineSimilarity(List queryVector) { + throw new UnsupportedOperationException("cosineSimilarity is not supported for bit vectors."); + } + + @Override + public double dotProduct(float[] queryVector) { + throw new UnsupportedOperationException("dotProduct is not supported for bit vectors."); + } + + @Override + public int getDims() { + return dims * Byte.SIZE; + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitBinaryDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitBinaryDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..cb123c54dfecf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitBinaryDenseVectorDocValuesField.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; + +public class BitBinaryDenseVectorDocValuesField extends ByteBinaryDenseVectorDocValuesField { + + public BitBinaryDenseVectorDocValuesField(BinaryDocValues input, String name, ElementType elementType, int dims) { + super(input, name, elementType, dims / 8); + } + + @Override + protected DenseVector getVector() { + return new BitBinaryDenseVector(vectorValue, value, dims); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitKnnDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitKnnDenseVector.java new file mode 100644 index 0000000000000..ce9d990c75851 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitKnnDenseVector.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script.field.vectors; + +import java.util.List; + +public class BitKnnDenseVector extends ByteKnnDenseVector { + + public BitKnnDenseVector(byte[] vector) { + super(vector); + } + + @Override + public void checkDimensions(int qvDims) { + if (qvDims != docVector.length) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + + qvDims * Byte.SIZE + + "] than the document vectors [" + + docVector.length * Byte.SIZE + + "]." + ); + } + } + + @Override + public float getMagnitude() { + if (magnitudeCalculated == false) { + magnitude = DenseVector.getBitMagnitude(docVector, docVector.length); + magnitudeCalculated = true; + } + return magnitude; + } + + @Override + public int l1Norm(byte[] queryVector) { + return hamming(queryVector); + } + + @Override + public double l1Norm(List queryVector) { + return hamming(queryVector); + } + + @Override + public double l2Norm(byte[] queryVector) { + return Math.sqrt(hamming(queryVector)); + } + + @Override + public double l2Norm(List queryVector) { + return Math.sqrt(hamming(queryVector)); + } + + @Override + public int dotProduct(byte[] queryVector) { + throw new UnsupportedOperationException("dotProduct is not supported for bit vectors."); + } + + @Override + public double cosineSimilarity(float[] queryVector, boolean normalizeQueryVector) { + throw new UnsupportedOperationException("cosineSimilarity is not supported for bit vectors."); + } + + @Override + public double dotProduct(List queryVector) { + throw new UnsupportedOperationException("dotProduct is not supported for bit vectors."); + } + + @Override + public double cosineSimilarity(byte[] queryVector, float qvMagnitude) { + throw new UnsupportedOperationException("cosineSimilarity is not supported for bit vectors."); + } + + @Override + public double cosineSimilarity(List queryVector) { + throw new UnsupportedOperationException("cosineSimilarity is not supported for bit vectors."); + } + + @Override + public double dotProduct(float[] queryVector) { + throw new UnsupportedOperationException("dotProduct is not supported for bit vectors."); + } + + @Override + public int getDims() { + return docVector.length * Byte.SIZE; + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitKnnDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitKnnDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..10421d992727e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitKnnDenseVectorDocValuesField.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.ByteVectorValues; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; + +public class BitKnnDenseVectorDocValuesField extends ByteKnnDenseVectorDocValuesField { + + public BitKnnDenseVectorDocValuesField(@Nullable ByteVectorValues input, String name, int dims) { + super(input, name, dims / 8, DenseVectorFieldMapper.ElementType.BIT); + } + + @Override + protected DenseVector getVector() { + return new BitKnnDenseVector(vector); + } + +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java index c009397452c8a..e5c2d6a370f12 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java @@ -21,7 +21,7 @@ public class ByteBinaryDenseVector implements DenseVector { private final BytesRef docVector; private final byte[] vectorValue; - private final int dims; + protected final int dims; private float[] floatDocVector; private boolean magnitudeDecoded; @@ -102,7 +102,7 @@ public double l1Norm(List queryVector) { @Override public int hamming(byte[] queryVector) { - return VectorUtil.xorBitCount(queryVector, vectorValue); + return ESVectorUtil.xorBitCount(queryVector, vectorValue); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVectorDocValuesField.java index b767cd72c4341..c7ce8cd5e937f 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVectorDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVectorDocValuesField.java @@ -17,11 +17,11 @@ public class ByteBinaryDenseVectorDocValuesField extends DenseVectorDocValuesField { - private final BinaryDocValues input; - private final int dims; - private final byte[] vectorValue; - private boolean decoded; - private BytesRef value; + protected final BinaryDocValues input; + protected final int dims; + protected final byte[] vectorValue; + protected boolean decoded; + protected BytesRef value; public ByteBinaryDenseVectorDocValuesField(BinaryDocValues input, String name, ElementType elementType, int dims) { super(name, elementType); @@ -50,13 +50,17 @@ public boolean isEmpty() { return value == null; } + protected DenseVector getVector() { + return new ByteBinaryDenseVector(vectorValue, value, dims); + } + @Override public DenseVector get() { if (isEmpty()) { return DenseVector.EMPTY; } decodeVectorIfNecessary(); - return new ByteBinaryDenseVector(vectorValue, value, dims); + return getVector(); } @Override @@ -65,7 +69,7 @@ public DenseVector get(DenseVector defaultValue) { return defaultValue; } decodeVectorIfNecessary(); - return new ByteBinaryDenseVector(vectorValue, value, dims); + return getVector(); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java index e0ba032826aa1..0145eb3eae04b 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java @@ -103,7 +103,7 @@ public double l1Norm(List queryVector) { @Override public int hamming(byte[] queryVector) { - return VectorUtil.xorBitCount(queryVector, docVector); + return ESVectorUtil.xorBitCount(queryVector, docVector); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVectorDocValuesField.java index a2a9ba1c1d750..a41e166d1d8f3 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVectorDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVectorDocValuesField.java @@ -23,7 +23,11 @@ public class ByteKnnDenseVectorDocValuesField extends DenseVectorDocValuesField protected final int dims; public ByteKnnDenseVectorDocValuesField(@Nullable ByteVectorValues input, String name, int dims) { - super(name, ElementType.BYTE); + this(input, name, dims, ElementType.BYTE); + } + + protected ByteKnnDenseVectorDocValuesField(@Nullable ByteVectorValues input, String name, int dims, ElementType elementType) { + super(name, elementType); this.dims = dims; this.input = input; } @@ -57,13 +61,17 @@ public boolean isEmpty() { return vector == null; } + protected DenseVector getVector() { + return new ByteKnnDenseVector(vector); + } + @Override public DenseVector get() { if (isEmpty()) { return DenseVector.EMPTY; } - return new ByteKnnDenseVector(vector); + return getVector(); } @Override @@ -72,7 +80,7 @@ public DenseVector get(DenseVector defaultValue) { return defaultValue; } - return new ByteKnnDenseVector(vector); + return getVector(); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java index a768e8add6663..d93daecf695a8 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java @@ -8,6 +8,7 @@ package org.elasticsearch.script.field.vectors; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.VectorUtil; import java.util.List; @@ -25,6 +26,10 @@ class of the argument and checks dimensionality. */ public interface DenseVector { + default void checkDimensions(int qvDims) { + checkDimensions(getDims(), qvDims); + } + float[] getVector(); float getMagnitude(); @@ -38,13 +43,13 @@ public interface DenseVector { @SuppressWarnings("unchecked") default double dotProduct(Object queryVector) { if (queryVector instanceof float[] floats) { - checkDimensions(getDims(), floats.length); + checkDimensions(floats.length); return dotProduct(floats); } else if (queryVector instanceof List list) { - checkDimensions(getDims(), list.size()); + checkDimensions(list.size()); return dotProduct((List) list); } else if (queryVector instanceof byte[] bytes) { - checkDimensions(getDims(), bytes.length); + checkDimensions(bytes.length); return dotProduct(bytes); } @@ -60,13 +65,13 @@ default double dotProduct(Object queryVector) { @SuppressWarnings("unchecked") default double l1Norm(Object queryVector) { if (queryVector instanceof float[] floats) { - checkDimensions(getDims(), floats.length); + checkDimensions(floats.length); return l1Norm(floats); } else if (queryVector instanceof List list) { - checkDimensions(getDims(), list.size()); + checkDimensions(list.size()); return l1Norm((List) list); } else if (queryVector instanceof byte[] bytes) { - checkDimensions(getDims(), bytes.length); + checkDimensions(bytes.length); return l1Norm(bytes); } @@ -80,11 +85,11 @@ default double l1Norm(Object queryVector) { @SuppressWarnings("unchecked") default int hamming(Object queryVector) { if (queryVector instanceof List list) { - checkDimensions(getDims(), list.size()); + checkDimensions(list.size()); return hamming((List) list); } if (queryVector instanceof byte[] bytes) { - checkDimensions(getDims(), bytes.length); + checkDimensions(bytes.length); return hamming(bytes); } @@ -100,13 +105,13 @@ default int hamming(Object queryVector) { @SuppressWarnings("unchecked") default double l2Norm(Object queryVector) { if (queryVector instanceof float[] floats) { - checkDimensions(getDims(), floats.length); + checkDimensions(floats.length); return l2Norm(floats); } else if (queryVector instanceof List list) { - checkDimensions(getDims(), list.size()); + checkDimensions(list.size()); return l2Norm((List) list); } else if (queryVector instanceof byte[] bytes) { - checkDimensions(getDims(), bytes.length); + checkDimensions(bytes.length); return l2Norm(bytes); } @@ -150,13 +155,13 @@ default double cosineSimilarity(float[] queryVector) { @SuppressWarnings("unchecked") default double cosineSimilarity(Object queryVector) { if (queryVector instanceof float[] floats) { - checkDimensions(getDims(), floats.length); + checkDimensions(floats.length); return cosineSimilarity(floats); } else if (queryVector instanceof List list) { - checkDimensions(getDims(), list.size()); + checkDimensions(list.size()); return cosineSimilarity((List) list); } else if (queryVector instanceof byte[] bytes) { - checkDimensions(getDims(), bytes.length); + checkDimensions(bytes.length); return cosineSimilarity(bytes); } @@ -184,6 +189,20 @@ static float getMagnitude(byte[] vector, int dims) { return (float) Math.sqrt(mag); } + static float getBitMagnitude(byte[] vector, int dims) { + int count = 0; + int i = 0; + for (int upperBound = dims & -8; i < upperBound; i += 8) { + count += Long.bitCount((long) BitUtil.VH_NATIVE_LONG.get(vector, i)); + } + + while (i < dims) { + count += Integer.bitCount(vector[i] & 255); + ++i; + } + return (float) Math.sqrt(count); + } + static float getMagnitude(float[] vector) { return (float) Math.sqrt(VectorUtil.dotProduct(vector, vector)); } diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ESVectorUtil.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ESVectorUtil.java new file mode 100644 index 0000000000000..7d9542bccf357 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ESVectorUtil.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BitUtil; +import org.apache.lucene.util.Constants; + +/** + * This class consists of a single utility method that provides XOR bit count computed over signed bytes. + * Remove this class when Lucene version > 9.11 is released, and replace with Lucene's VectorUtil directly. + */ +public class ESVectorUtil { + + /** + * For xorBitCount we stride over the values as either 64-bits (long) or 32-bits (int) at a time. + * On ARM Long::bitCount is not vectorized, and therefore produces less than optimal code, when + * compared to Integer::bitCount. While Long::bitCount is optimal on x64. + */ + static final boolean XOR_BIT_COUNT_STRIDE_AS_INT = Constants.OS_ARCH.equals("aarch64"); + + /** + * XOR bit count computed over signed bytes. + * + * @param a bytes containing a vector + * @param b bytes containing another vector, of the same dimension + * @return the value of the XOR bit count of the two vectors + */ + public static int xorBitCount(byte[] a, byte[] b) { + if (a.length != b.length) { + throw new IllegalArgumentException("vector dimensions differ: " + a.length + "!=" + b.length); + } + if (XOR_BIT_COUNT_STRIDE_AS_INT) { + return xorBitCountInt(a, b); + } else { + return xorBitCountLong(a, b); + } + } + + /** XOR bit count striding over 4 bytes at a time. */ + static int xorBitCountInt(byte[] a, byte[] b) { + int distance = 0, i = 0; + for (final int upperBound = a.length & -Integer.BYTES; i < upperBound; i += Integer.BYTES) { + distance += Integer.bitCount((int) BitUtil.VH_NATIVE_INT.get(a, i) ^ (int) BitUtil.VH_NATIVE_INT.get(b, i)); + } + // tail: + for (; i < a.length; i++) { + distance += Integer.bitCount((a[i] ^ b[i]) & 0xFF); + } + return distance; + } + + /** XOR bit count striding over 8 bytes at a time. */ + static int xorBitCountLong(byte[] a, byte[] b) { + int distance = 0, i = 0; + for (final int upperBound = a.length & -Long.BYTES; i < upperBound; i += Long.BYTES) { + distance += Long.bitCount((long) BitUtil.VH_NATIVE_LONG.get(a, i) ^ (long) BitUtil.VH_NATIVE_LONG.get(b, i)); + } + // tail: + for (; i < a.length; i++) { + distance += Integer.bitCount((a[i] ^ b[i]) & 0xFF); + } + return distance; + } + + private ESVectorUtil() {} +} diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 4f16d3a5720fb..dc92cfd11fce3 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -943,8 +943,8 @@ public IdLoader newIdLoader() { // to the routing path. Set matchingRoutingPaths = new TreeSet<>(routingPaths); for (Mapper mapper : indexService.mapperService().mappingLookup().fieldMappers()) { - if (mapper instanceof KeywordFieldMapper && indexRouting.matchesField(mapper.name())) { - matchingRoutingPaths.add(mapper.name()); + if (mapper instanceof KeywordFieldMapper && indexRouting.matchesField(mapper.fullPath())) { + matchingRoutingPaths.add(mapper.fullPath()); } } routingPaths = new ArrayList<>(matchingRoutingPaths); diff --git a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java new file mode 100644 index 0000000000000..9d6abda593272 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; + +import java.util.Set; + +public final class SearchFeatures implements FeatureSpecification { + @Override + public Set getFeatures() { + return Set.of(KnnVectorQueryBuilder.K_PARAM_SUPPORTED); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index b45a2e2e2ca14..979a59b4d0b94 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -475,6 +475,7 @@ protected void putReaderContext(ReaderContext context) { } protected ReaderContext removeReaderContext(long id) { + logger.trace("removing reader context [{}]", id); return activeReaders.remove(id); } @@ -1175,6 +1176,7 @@ private void freeAllContextsForShard(ShardId shardId) { } public boolean freeReaderContext(ShardSearchContextId contextId) { + logger.trace("freeing reader context [{}]", contextId); if (sessionId.equals(contextId.getSessionId())) { try (ReaderContext context = removeReaderContext(contextId.getId())) { return context != null; @@ -1838,7 +1840,13 @@ public AggregationReduceContext.Builder aggReduceContextBuilder(Supplier isCanceled, - AggregatorFactories.Builder builders + AggregatorFactories.Builder builders, + IntConsumer multiBucketConsumer ) { super(bigArrays, scriptService, isCanceled, builders); + this.multiBucketConsumer = multiBucketConsumer; } - public ForPartial(BigArrays bigArrays, ScriptService scriptService, Supplier isCanceled, AggregationBuilder builder) { + public ForPartial( + BigArrays bigArrays, + ScriptService scriptService, + Supplier isCanceled, + AggregationBuilder builder, + IntConsumer multiBucketConsumer + ) { super(bigArrays, scriptService, isCanceled, builder); + this.multiBucketConsumer = multiBucketConsumer; } @Override @@ -158,7 +169,9 @@ public boolean isFinalReduce() { } @Override - protected void consumeBucketCountAndMaybeBreak(int size) {} + protected void consumeBucketCountAndMaybeBreak(int size) { + multiBucketConsumer.accept(size); + } @Override public PipelineTree pipelineTreeRoot() { @@ -167,7 +180,7 @@ public PipelineTree pipelineTreeRoot() { @Override protected AggregationReduceContext forSubAgg(AggregationBuilder sub) { - return new ForPartial(bigArrays(), scriptService(), isCanceled(), sub); + return new ForPartial(bigArrays(), scriptService(), isCanceled(), sub, multiBucketConsumer); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index 795f51a729ed6..0cebf3d79d754 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -316,7 +316,10 @@ protected void doClose() {} protected void doPostCollection() throws IOException {} protected final InternalAggregations buildEmptySubAggregations() { - List aggs = new ArrayList<>(); + if (subAggregators.length == 0) { + return InternalAggregations.EMPTY; + } + List aggs = new ArrayList<>(subAggregators.length); for (Aggregator aggregator : subAggregators) { aggs.add(aggregator.buildEmptyAggregation()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index b65f6b01de348..297bb81b27b25 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -34,7 +34,6 @@ import java.util.Optional; import java.util.stream.Collectors; -import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.xcontent.XContentParserUtils.parseTypedKeysObject; @@ -71,20 +70,10 @@ public Iterator iterator() { * The list of {@link InternalAggregation}s. */ public List asList() { - return unmodifiableList(aggregations); - } - - /** - * Returns the {@link InternalAggregation}s keyed by aggregation name. - */ - public Map asMap() { - return getAsMap(); + return aggregations; } - /** - * Returns the {@link InternalAggregation}s keyed by aggregation name. - */ - public Map getAsMap() { + private Map asMap() { if (aggregationsAsMap == null) { Map newAggregationsAsMap = Maps.newMapWithExpectedSize(aggregations.size()); for (InternalAggregation aggregation : aggregations) { @@ -237,11 +226,34 @@ public static InternalAggregations topLevelReduce(List agg } if (context.isFinalReduce()) { List reducedInternalAggs = reduced.getInternalAggregations(); - reducedInternalAggs = reducedInternalAggs.stream() - .map(agg -> agg.reducePipelines(agg, context, context.pipelineTreeRoot().subTree(agg.getName()))) - .collect(Collectors.toCollection(ArrayList::new)); + List internalAggregations = null; + for (int i = 0; i < reducedInternalAggs.size(); i++) { + InternalAggregation agg = reducedInternalAggs.get(i); + InternalAggregation internalAggregation = agg.reducePipelines( + agg, + context, + context.pipelineTreeRoot().subTree(agg.getName()) + ); + if (internalAggregation.equals(agg) == false) { + if (internalAggregations == null) { + internalAggregations = new ArrayList<>(reducedInternalAggs); + } + internalAggregations.set(i, internalAggregation); + } + } - for (PipelineAggregator pipelineAggregator : context.pipelineTreeRoot().aggregators()) { + var pipelineAggregators = context.pipelineTreeRoot().aggregators(); + if (pipelineAggregators.isEmpty()) { + if (internalAggregations == null) { + return reduced; + } + return from(internalAggregations); + } + if (internalAggregations != null) { + reducedInternalAggs = internalAggregations; + } + reducedInternalAggs = new ArrayList<>(reducedInternalAggs); + for (PipelineAggregator pipelineAggregator : pipelineAggregators) { SiblingPipelineAggregator sib = (SiblingPipelineAggregator) pipelineAggregator; InternalAggregation newAgg = sib.doReduce(from(reducedInternalAggs), context); reducedInternalAggs.add(newAgg); @@ -263,7 +275,7 @@ public static InternalAggregations reduce(List aggregation } // handle special case when there is just one aggregation if (aggregationsList.size() == 1) { - final List internalAggregations = aggregationsList.iterator().next().asList(); + final List internalAggregations = aggregationsList.get(0).asList(); final List reduced = new ArrayList<>(internalAggregations.size()); for (InternalAggregation aggregation : internalAggregations) { if (aggregation.mustReduceOnSingleInternalAgg()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index 6dd691bbf5aaa..e046b5fc9244c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -132,7 +132,7 @@ static Object resolvePropertyFromPath(List path, List consumer) { } private List reducePipelineBuckets(AggregationReduceContext reduceContext, PipelineTree pipelineTree) { - List reducedBuckets = new ArrayList<>(); - for (B bucket : getBuckets()) { - List aggs = new ArrayList<>(); - for (Aggregation agg : bucket.getAggregations()) { + List reducedBuckets = null; + var buckets = getBuckets(); + for (int bucketIndex = 0; bucketIndex < buckets.size(); bucketIndex++) { + B bucket = buckets.get(bucketIndex); + List aggs = null; + int aggIndex = 0; + for (InternalAggregation agg : bucket.getAggregations()) { PipelineTree subTree = pipelineTree.subTree(agg.getName()); - aggs.add(((InternalAggregation) agg).reducePipelines((InternalAggregation) agg, reduceContext, subTree)); + var reduced = agg.reducePipelines(agg, reduceContext, subTree); + if (reduced.equals(agg) == false) { + if (aggs == null) { + aggs = bucket.getAggregations().copyResults(); + } + aggs.set(aggIndex, reduced); + } + aggIndex++; + } + if (aggs != null) { + if (reducedBuckets == null) { + reducedBuckets = new ArrayList<>(buckets); + } + reducedBuckets.set(bucketIndex, createBucket(InternalAggregations.from(aggs), bucket)); } - reducedBuckets.add(createBucket(InternalAggregations.from(aggs), bucket)); } - return reducedBuckets; + return reducedBuckets == null ? buckets : reducedBuckets; } public abstract static class InternalBucket implements Bucket, Writeable { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketConsumerService.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketConsumerService.java index c876f971a7c65..2519e4e263d00 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketConsumerService.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketConsumerService.java @@ -65,6 +65,11 @@ public TooManyBucketsException(StreamInput in) throws IOException { maxBuckets = in.readInt(); } + @Override + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace + } + @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); @@ -134,10 +139,37 @@ public int getCount() { } } - public MultiBucketConsumer create() { + /** + * Similar to {@link MultiBucketConsumer} but it only checks the parent circuit breaker every 1024 calls. + * It provides protection for OOM during partial reductions. + */ + private static class MultiBucketConsumerPartialReduction implements IntConsumer { + private final CircuitBreaker breaker; + + // aggregations execute in a single thread so no atomic here + private int callCount = 0; + + private MultiBucketConsumerPartialReduction(CircuitBreaker breaker) { + this.breaker = breaker; + } + + @Override + public void accept(int value) { + // check parent circuit breaker every 1024 calls + if ((++callCount & 0x3FF) == 0) { + breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } + } + } + + public IntConsumer createForFinal() { return new MultiBucketConsumer(maxBucket, breaker); } + public IntConsumer createForPartial() { + return new MultiBucketConsumerPartialReduction(breaker); + } + public int getLimit() { return maxBucket; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java index 225b99fe40739..1fb7464dd5066 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java @@ -131,7 +131,7 @@ public final InternalAggregation reducePipelines( InternalAggregation reduced = this; if (pipelineTree.hasSubTrees()) { List aggs = new ArrayList<>(); - for (InternalAggregation agg : getAggregations().asList()) { + for (InternalAggregation agg : getAggregations()) { PipelineTree subTree = pipelineTree.subTree(agg.getName()); aggs.add(agg.reducePipelines(agg, reduceContext, subTree)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java index 62b7a0747ca00..04028de5656ca 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java @@ -84,6 +84,9 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I double key = roundKey * interval + offset; return new InternalHistogram.Bucket(key, docCount, keyed, formatter, subAggregationResults); }, (owningBucketOrd, buckets) -> { + if (buckets.isEmpty()) { + return buildEmptyAggregation(); + } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java new file mode 100644 index 0000000000000..ef020cd55bfaf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; + +/** + * A bucket in the histogram where documents fall in + */ +public abstract class AbstractHistogramBucket extends InternalMultiBucketAggregation.InternalBucket { + + protected final long docCount; + protected final InternalAggregations aggregations; + protected final transient DocValueFormat format; + + protected AbstractHistogramBucket(long docCount, InternalAggregations aggregations, DocValueFormat format) { + this.docCount = docCount; + this.aggregations = aggregations; + this.format = format; + } + + @Override + public final long getDocCount() { + return docCount; + } + + @Override + public final InternalAggregations getAggregations() { + return aggregations; + } + + public final DocValueFormat getFormatter() { + return format; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 2c57bd4b38a04..cb01aa5a31a9a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -340,6 +340,9 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds, (bucketValue, docCount, subAggregationResults) -> { return new InternalDateHistogram.Bucket(bucketValue, docCount, keyed, formatter, subAggregationResults); }, (owningBucketOrd, buckets) -> { + if (buckets.isEmpty()) { + return buildEmptyAggregation(); + } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java index 834d3c10016cb..7d06e4ad583fc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java @@ -10,8 +10,6 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.xcontent.ParseField; -import java.util.List; - /** * A {@code histogram} aggregation. Defines multiple buckets, each representing an interval in a histogram. */ @@ -25,17 +23,4 @@ public interface Histogram extends MultiBucketsAggregation { ParseField EXTENDED_BOUNDS_FIELD = new ParseField("extended_bounds"); ParseField HARD_BOUNDS_FIELD = new ParseField("hard_bounds"); - /** - * A bucket in the histogram where documents fall in - */ - interface Bucket extends MultiBucketsAggregation.Bucket { - - } - - /** - * @return The buckets of this histogram (each bucket representing an interval in the histogram) - */ - @Override - List getBuckets(); - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index e75b2d2002b0f..4cfa7f449cf57 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -49,31 +49,22 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< Histogram, HistogramFactory { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { + public static class Bucket extends AbstractHistogramBucket implements KeyComparable { final long key; - final long docCount; - final InternalAggregations aggregations; private final transient boolean keyed; - protected final transient DocValueFormat format; public Bucket(long key, long docCount, boolean keyed, DocValueFormat format, InternalAggregations aggregations) { - this.format = format; + super(docCount, aggregations, format); this.keyed = keyed; this.key = key; - this.docCount = docCount; - this.aggregations = aggregations; } /** * Read from a stream. */ - public Bucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException { - this.format = format; - this.keyed = keyed; - key = in.readLong(); - docCount = in.readVLong(); - aggregations = InternalAggregations.readFrom(in); + public static Bucket readFrom(StreamInput in, boolean keyed, DocValueFormat format) throws IOException { + return new Bucket(in.readLong(), in.readVLong(), keyed, format, InternalAggregations.readFrom(in)); } @Override @@ -109,16 +100,6 @@ public Object getKey() { return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } - @Override - public long getDocCount() { - return docCount; - } - - @Override - public InternalAggregations getAggregations() { - return aggregations; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String keyAsString = format.format(key).toString(); @@ -142,10 +123,6 @@ public int compareKey(Bucket other) { return Long.compare(key, other.key); } - public DocValueFormat getFormatter() { - return format; - } - public boolean getKeyed() { return keyed; } @@ -259,7 +236,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { } else { downsampledResultsOffset = false; } - buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); + buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, keyed, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract @@ -327,7 +304,6 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) } private List reduceBuckets(final PriorityQueue> pq, AggregationReduceContext reduceContext) { - int consumeBucketCount = 0; List reducedBuckets = new ArrayList<>(); if (pq.size() > 0) { // list of buckets coming from different shards that have the same key @@ -340,13 +316,7 @@ private List reduceBuckets(final PriorityQueue= minDocCount || reduceContext.isFinalReduce() == false) { - if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { - reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); - consumeBucketCount = 0; - } - reducedBuckets.add(reduced); - } + maybeAddBucket(reduceContext, reducedBuckets, reduced); currentBuckets.clear(); key = top.current().key; } @@ -364,19 +334,21 @@ private List reduceBuckets(final PriorityQueue= minDocCount || reduceContext.isFinalReduce() == false) { - reducedBuckets.add(reduced); - if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { - reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); - consumeBucketCount = 0; - } - } + maybeAddBucket(reduceContext, reducedBuckets, reduced); } } - reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); return reducedBuckets; } + private void maybeAddBucket(AggregationReduceContext reduceContext, List reducedBuckets, Bucket reduced) { + if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) { + reduceContext.consumeBucketsAndMaybeBreak(1); + reducedBuckets.add(reduced); + } else { + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(reduced)); + } + } + /** * Reduce a list of same-keyed buckets (from multiple shards) to a single bucket. This * requires all buckets to have the same key. @@ -525,9 +497,7 @@ public InternalAggregation get() { } if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... - List reverse = new ArrayList<>(reducedBuckets); - Collections.reverse(reverse); - reducedBuckets = reverse; + Collections.reverse(reducedBuckets); } else if (InternalOrder.isKeyAsc(order) == false) { // nothing to do when sorting by key ascending, as data is already sorted since shards return // sorted buckets and the merge-sort performed by reduceBuckets maintains order. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 7b264ccb022e5..2404de76fdd35 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -44,31 +44,22 @@ public class InternalHistogram extends InternalMultiBucketAggregation { + public static class Bucket extends AbstractHistogramBucket implements KeyComparable { final double key; - final long docCount; - final InternalAggregations aggregations; private final transient boolean keyed; - protected final transient DocValueFormat format; public Bucket(double key, long docCount, boolean keyed, DocValueFormat format, InternalAggregations aggregations) { - this.format = format; + super(docCount, aggregations, format); this.keyed = keyed; this.key = key; - this.docCount = docCount; - this.aggregations = aggregations; } /** * Read from a stream. */ - public Bucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException { - this.format = format; - this.keyed = keyed; - key = in.readDouble(); - docCount = in.readVLong(); - aggregations = InternalAggregations.readFrom(in); + public static Bucket readFrom(StreamInput in, boolean keyed, DocValueFormat format) throws IOException { + return new Bucket(in.readDouble(), in.readVLong(), keyed, format, InternalAggregations.readFrom(in)); } @Override @@ -104,16 +95,6 @@ public Object getKey() { return key; } - @Override - public long getDocCount() { - return docCount; - } - - @Override - public InternalAggregations getAggregations() { - return aggregations; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String keyAsString = format.format(key).toString(); @@ -137,10 +118,6 @@ public int compareKey(Bucket other) { return Double.compare(key, other.key); } - public DocValueFormat getFormatter() { - return format; - } - public boolean getKeyed() { return keyed; } @@ -242,7 +219,7 @@ public InternalHistogram(StreamInput in) throws IOException { } format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); - buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); + buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, keyed, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract @@ -282,16 +259,21 @@ BucketOrder getOrder() { @Override public InternalHistogram create(List buckets) { + if (this.buckets.equals(buckets)) { + return this; + } return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format, keyed, metadata); } @Override public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + if (prototype.aggregations.equals(aggregations)) { + return prototype; + } return new Bucket(prototype.key, prototype.docCount, prototype.keyed, prototype.format, aggregations); } private List reduceBuckets(PriorityQueue> pq, AggregationReduceContext reduceContext) { - int consumeBucketCount = 0; List reducedBuckets = new ArrayList<>(); if (pq.size() > 0) { // list of buckets coming from different shards that have the same key @@ -305,13 +287,7 @@ private List reduceBuckets(PriorityQueue> pq, // The key changes, reduce what we already buffered and reset the buffer for current buckets. // Using Double.compare instead of != to handle NaN correctly. final Bucket reduced = reduceBucket(currentBuckets, reduceContext); - if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) { - reducedBuckets.add(reduced); - if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { - reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); - consumeBucketCount = 0; - } - } + maybeAddBucket(reduceContext, reducedBuckets, reduced); currentBuckets.clear(); key = top.current().key; } @@ -329,20 +305,21 @@ private List reduceBuckets(PriorityQueue> pq, if (currentBuckets.isEmpty() == false) { final Bucket reduced = reduceBucket(currentBuckets, reduceContext); - if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) { - reducedBuckets.add(reduced); - if (consumeBucketCount++ >= REPORT_EMPTY_EVERY) { - reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); - consumeBucketCount = 0; - } - } + maybeAddBucket(reduceContext, reducedBuckets, reduced); } } - - reduceContext.consumeBucketsAndMaybeBreak(consumeBucketCount); return reducedBuckets; } + private void maybeAddBucket(AggregationReduceContext reduceContext, List reducedBuckets, Bucket reduced) { + if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) { + reduceContext.consumeBucketsAndMaybeBreak(1); + reducedBuckets.add(reduced); + } else { + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(reduced)); + } + } + private Bucket reduceBucket(List buckets, AggregationReduceContext context) { assert buckets.isEmpty() == false; try (BucketReducer reducer = new BucketReducer<>(buckets.get(0), context, buckets.size())) { @@ -471,9 +448,7 @@ public InternalAggregation get() { } if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... - List reverse = new ArrayList<>(reducedBuckets); - Collections.reverse(reverse); - reducedBuckets = reverse; + Collections.reverse(reducedBuckets); } else if (InternalOrder.isKeyAsc(order) == false) { // nothing to do when sorting by key ascending, as data is already sorted since shards return // sorted buckets and the merge-sort performed by reduceBuckets maintains order. @@ -481,6 +456,9 @@ public InternalAggregation get() { CollectionUtil.introSort(reducedBuckets, order.comparator()); } } + if (reducedBuckets.equals(buckets)) { + return InternalHistogram.this; + } return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, getMetadata()); } }; @@ -526,14 +504,9 @@ public Number getKey(MultiBucketsAggregation.Bucket bucket) { } @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) public InternalAggregation createAggregation(List buckets) { - // convert buckets to the right type - List buckets2 = new ArrayList<>(buckets.size()); - for (Object b : buckets) { - buckets2.add((Bucket) b); - } - buckets2 = Collections.unmodifiableList(buckets2); - return new InternalHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format, keyed, getMetadata()); + return new InternalHistogram(name, (List) buckets, order, minDocCount, emptyBucketInfo, format, keyed, getMetadata()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 46b5a1b7629d8..675b5d218c882 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -37,7 +37,7 @@ public class InternalVariableWidthHistogram extends InternalMultiBucketAggregati InternalVariableWidthHistogram, InternalVariableWidthHistogram.Bucket> implements Histogram, HistogramFactory { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { + public static class Bucket extends AbstractHistogramBucket implements KeyComparable { public static class BucketBounds { public double min; @@ -72,28 +72,23 @@ public int hashCode() { } private final BucketBounds bounds; - private long docCount; - private InternalAggregations aggregations; - protected final transient DocValueFormat format; - private double centroid; + private final double centroid; public Bucket(double centroid, BucketBounds bounds, long docCount, DocValueFormat format, InternalAggregations aggregations) { - this.format = format; + super(docCount, aggregations, format); this.centroid = centroid; this.bounds = bounds; - this.docCount = docCount; - this.aggregations = aggregations; } /** * Read from a stream. */ - public Bucket(StreamInput in, DocValueFormat format) throws IOException { - this.format = format; - centroid = in.readDouble(); - docCount = in.readVLong(); - bounds = new BucketBounds(in); - aggregations = InternalAggregations.readFrom(in); + public static Bucket readFrom(StreamInput in, DocValueFormat format) throws IOException { + final double centroid = in.readDouble(); + final long docCount = in.readVLong(); + final BucketBounds bounds = new BucketBounds(in); + final InternalAggregations aggregations = InternalAggregations.readFrom(in); + return new Bucket(centroid, bounds, docCount, format, aggregations); } @Override @@ -123,7 +118,7 @@ public int hashCode() { @Override public String getKeyAsString() { - return format.format((double) getKey()).toString(); + return format.format(centroid).toString(); } /** @@ -148,16 +143,6 @@ public double centroid() { return centroid; } - @Override - public long getDocCount() { - return docCount; - } - - @Override - public InternalAggregations getAggregations() { - return aggregations; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String keyAsString = format.format((double) getKey()).toString(); @@ -231,7 +216,7 @@ public int hashCode() { } } - private List buckets; + private final List buckets; private final DocValueFormat format; private final int targetNumBuckets; final EmptyBucketInfo emptyBucketInfo; @@ -258,7 +243,7 @@ public InternalVariableWidthHistogram(StreamInput in) throws IOException { super(in); emptyBucketInfo = new EmptyBucketInfo(in); format = in.readNamedWriteable(DocValueFormat.class); - buckets = in.readCollectionAsList(stream -> new Bucket(stream, format)); + buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, format)); targetNumBuckets = in.readVInt(); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 7190589de38c4..bf923339c73f5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -172,7 +172,8 @@ public static boolean supportsParallelCollection(long cardinality, BucketOrder o return cardinality <= KEY_ORDER_CONCURRENCY_THRESHOLD; } BucketCountThresholds adjusted = TermsAggregatorFactory.adjustBucketCountThresholds(bucketCountThresholds, order); - return cardinality <= adjusted.getShardSize(); + // for cardinality equal to shard size, we don't know if there were more terms when merging. + return cardinality < adjusted.getShardSize(); } return false; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java index 7e749b06442f6..38bcc912c29d4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.singletonList; @@ -146,9 +147,11 @@ private State aggStateForResult(long owningBucketOrdinal) { return state; } + private static final List NULL_ITEM_LIST = singletonList(null); + @Override public InternalAggregation buildEmptyAggregation() { - return new InternalScriptedMetric(name, singletonList(null), reduceScript, metadata()); + return new InternalScriptedMetric(name, NULL_ITEM_LIST, reduceScript, metadata()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index cb2c53a97fbc3..a80378db99348 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -10,12 +10,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.Nullable; @@ -28,6 +32,7 @@ import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchExtBuilder; +import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; @@ -43,7 +48,9 @@ import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.slice.SliceBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -71,6 +78,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; import static org.elasticsearch.search.internal.SearchContext.DEFAULT_TERMINATE_AFTER; import static org.elasticsearch.search.internal.SearchContext.TRACK_TOTAL_HITS_ACCURATE; @@ -78,10 +86,9 @@ /** * A search source builder allowing to easily build search source. Simple - * construction using - * {@link org.elasticsearch.search.builder.SearchSourceBuilder#searchSource()}. + * construction using {@link SearchSourceBuilder#searchSource()}. * - * @see org.elasticsearch.action.search.SearchRequest#source(SearchSourceBuilder) + * @see SearchRequest#source(SearchSourceBuilder) */ public final class SearchSourceBuilder implements Writeable, ToXContentObject, Rewriteable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SearchSourceBuilder.class); @@ -141,6 +148,8 @@ public static HighlightBuilder highlight() { return new HighlightBuilder(); } + private transient RetrieverBuilder retrieverBuilder; + private List subSearchSourceBuilders = new ArrayList<>(); private QueryBuilder postQueryBuilder; @@ -283,6 +292,9 @@ public SearchSourceBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { + if (retrieverBuilder != null) { + throw new IllegalStateException("SearchSourceBuilder should be rewritten first"); + } out.writeOptionalWriteable(aggregations); out.writeOptionalBoolean(explain); out.writeOptionalWriteable(fetchSourceContext); @@ -367,6 +379,18 @@ public void writeTo(StreamOutput out) throws IOException { } } + /** + * Sets the retriever for this request. + */ + public SearchSourceBuilder retriever(RetrieverBuilder retrieverBuilder) { + this.retrieverBuilder = retrieverBuilder; + return this; + } + + public RetrieverBuilder retriever() { + return retrieverBuilder; + } + /** * Sets the query for this request. */ @@ -1134,6 +1158,21 @@ public SearchSourceBuilder rewrite(QueryRewriteContext context) throws IOExcepti highlightBuilder ) )); + if (retrieverBuilder != null) { + var newRetriever = retrieverBuilder.rewrite(context); + if (newRetriever != retrieverBuilder) { + var rewritten = shallowCopy(); + rewritten.retrieverBuilder = newRetriever; + return rewritten; + } else { + // retriever is transient, the rewritten version is extracted in this source. + var retriever = retrieverBuilder; + retrieverBuilder = null; + retriever.extractToSearchSourceBuilder(this, false); + validate(); + } + } + List subSearchSourceBuilders = Rewriteable.rewrite(this.subSearchSourceBuilders, context); QueryBuilder postQueryBuilder = null; if (this.postQueryBuilder != null) { @@ -1293,7 +1332,6 @@ private SearchSourceBuilder parseXContent( } List knnBuilders = new ArrayList<>(); - RetrieverBuilder retrieverBuilder = null; SearchUsage searchUsage = new SearchUsage(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -1627,39 +1665,6 @@ private SearchSourceBuilder parseXContent( } knnSearch = knnBuilders.stream().map(knnBuilder -> knnBuilder.build(size())).collect(Collectors.toList()); - - if (retrieverBuilder != null) { - List specified = new ArrayList<>(); - if (subSearchSourceBuilders.isEmpty() == false) { - specified.add(QUERY_FIELD.getPreferredName()); - } - if (knnSearch.isEmpty() == false) { - specified.add(KNN_FIELD.getPreferredName()); - } - if (searchAfterBuilder != null) { - specified.add(SEARCH_AFTER.getPreferredName()); - } - if (terminateAfter != DEFAULT_TERMINATE_AFTER) { - specified.add(TERMINATE_AFTER_FIELD.getPreferredName()); - } - if (sorts != null) { - specified.add(SORT_FIELD.getPreferredName()); - } - if (rescoreBuilders != null) { - specified.add(RESCORE_FIELD.getPreferredName()); - } - if (minScore != null) { - specified.add(MIN_SCORE_FIELD.getPreferredName()); - } - if (rankBuilder != null) { - specified.add(RANK_FIELD.getPreferredName()); - } - if (specified.isEmpty() == false) { - throw new IllegalArgumentException("cannot specify [" + RETRIEVER.getPreferredName() + "] and " + specified); - } - retrieverBuilder.extractToSearchSourceBuilder(this, false); - } - searchUsageConsumer.accept(searchUsage); return this; } @@ -1689,6 +1694,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(TERMINATE_AFTER_FIELD.getPreferredName(), terminateAfter); } + if (retrieverBuilder != null) { + builder.field(RETRIEVER.getPreferredName(), retrieverBuilder); + } + if (subSearchSourceBuilders.isEmpty() == false) { if (subSearchSourceBuilders.size() == 1) { builder.field(QUERY_FIELD.getPreferredName(), subSearchSourceBuilders.get(0).getQueryBuilder()); @@ -2183,4 +2192,169 @@ public boolean supportsParallelCollection(ToLongFunction fieldCardinalit return collapse == null && (aggregations == null || aggregations.supportsParallelCollection(fieldCardinality)); } + + private void validate() throws ValidationException { + var exceptions = validate(null, false); + if (exceptions != null) { + throw exceptions; + } + } + + public ActionRequestValidationException validate(ActionRequestValidationException validationException, boolean isScroll) { + if (retriever() != null) { + List specified = new ArrayList<>(); + if (subSearches().isEmpty() == false) { + specified.add(QUERY_FIELD.getPreferredName()); + } + if (knnSearch().isEmpty() == false) { + specified.add(KNN_FIELD.getPreferredName()); + } + if (searchAfter() != null) { + specified.add(SEARCH_AFTER.getPreferredName()); + } + if (terminateAfter() != DEFAULT_TERMINATE_AFTER) { + specified.add(TERMINATE_AFTER_FIELD.getPreferredName()); + } + if (sorts() != null) { + specified.add(SORT_FIELD.getPreferredName()); + } + if (rescores() != null) { + specified.add(RESCORE_FIELD.getPreferredName()); + } + if (minScore() != null) { + specified.add(MIN_SCORE_FIELD.getPreferredName()); + } + if (rankBuilder() != null) { + specified.add(RANK_FIELD.getPreferredName()); + } + if (specified.isEmpty() == false) { + validationException = addValidationError( + "cannot specify [" + RETRIEVER.getPreferredName() + "] and " + specified, + validationException + ); + } + } + if (isScroll) { + if (trackTotalHitsUpTo() != null && trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE) { + validationException = addValidationError( + "disabling [track_total_hits] is not allowed in a scroll context", + validationException + ); + } + if (from() > 0) { + validationException = addValidationError("using [from] is not allowed in a scroll context", validationException); + } + if (size() == 0) { + validationException = addValidationError("[size] cannot be [0] in a scroll context", validationException); + } + if (rescores() != null && rescores().isEmpty() == false) { + validationException = addValidationError("using [rescore] is not allowed in a scroll context", validationException); + } + if (CollectionUtils.isEmpty(searchAfter()) == false) { + validationException = addValidationError("[search_after] cannot be used in a scroll context", validationException); + } + if (collapse() != null) { + validationException = addValidationError("cannot use `collapse` in a scroll context", validationException); + } + } + if (slice() != null) { + if (pointInTimeBuilder() == null && (isScroll == false)) { + validationException = addValidationError( + "[slice] can only be used with [scroll] or [point-in-time] requests", + validationException + ); + } + } + if (from() > 0 && CollectionUtils.isEmpty(searchAfter()) == false) { + validationException = addValidationError("[from] parameter must be set to 0 when [search_after] is used", validationException); + } + if (storedFields() != null) { + if (storedFields().fetchFields() == false) { + if (fetchSource() != null && fetchSource().fetchSource()) { + validationException = addValidationError( + "[stored_fields] cannot be disabled if [_source] is requested", + validationException + ); + } + if (fetchFields() != null) { + validationException = addValidationError( + "[stored_fields] cannot be disabled when using the [fields] option", + validationException + ); + } + } + } + if (subSearches().size() >= 2 && rankBuilder() == null) { + validationException = addValidationError("[sub_searches] requires [rank]", validationException); + } + if (aggregations() != null) { + validationException = aggregations().validate(validationException); + } + + if (rankBuilder() != null) { + int s = size() == -1 ? SearchService.DEFAULT_SIZE : size(); + if (s == 0) { + validationException = addValidationError("[rank] requires [size] greater than [0]", validationException); + } + if (s > rankBuilder().rankWindowSize()) { + validationException = addValidationError( + "[rank] requires [rank_window_size: " + + rankBuilder().rankWindowSize() + + "]" + + " be greater than or equal to [size: " + + s + + "]", + validationException + ); + } + int queryCount = subSearches().size() + knnSearch().size(); + if (rankBuilder().isCompoundBuilder() && queryCount < 2) { + validationException = addValidationError( + "[rank] requires a minimum of [2] result sets using a combination of sub searches and/or knn searches", + validationException + ); + } + if (isScroll) { + validationException = addValidationError("[rank] cannot be used in a scroll context", validationException); + } + if (rescores() != null && rescores().isEmpty() == false) { + validationException = addValidationError("[rank] cannot be used with [rescore]", validationException); + } + if (sorts() != null && sorts().isEmpty() == false) { + validationException = addValidationError("[rank] cannot be used with [sort]", validationException); + } + if (collapse() != null) { + validationException = addValidationError("[rank] cannot be used with [collapse]", validationException); + } + if (suggest() != null && suggest().getSuggestions().isEmpty() == false) { + validationException = addValidationError("[rank] cannot be used with [suggest]", validationException); + } + if (highlighter() != null) { + validationException = addValidationError("[rank] cannot be used with [highlighter]", validationException); + } + if (pointInTimeBuilder() != null) { + validationException = addValidationError("[rank] cannot be used with [point in time]", validationException); + } + } + + if (rescores() != null) { + for (@SuppressWarnings("rawtypes") + var rescorer : rescores()) { + validationException = rescorer.validate(this, validationException); + } + } + + if (pointInTimeBuilder() == null && sorts() != null) { + for (var sortBuilder : sorts()) { + if (sortBuilder instanceof FieldSortBuilder fieldSortBuilder + && ShardDocSortField.NAME.equals(fieldSortBuilder.getFieldName())) { + validationException = addValidationError( + "[" + FieldSortBuilder.SHARD_DOC_FIELD_NAME + "] sort field cannot be used without [point in time]", + validationException + ); + } + } + } + return validationException; + } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchContext.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchContext.java index 65d49f771a045..85ce8a9fdc5d0 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchContext.java @@ -189,7 +189,7 @@ public FetchDocValuesContext docValuesContext() { searchContext.getSearchExecutionContext(), Collections.singletonList(new FieldAndFormat(name, null)) ); - } else if (searchContext.docValuesContext().fields().stream().map(ff -> ff.field).anyMatch(name::equals) == false) { + } else if (searchContext.docValuesContext().fields().stream().map(ff -> ff.field).noneMatch(name::equals)) { dvContext.fields().add(new FieldAndFormat(name, null)); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java index 2ae7f6d07bbb9..6a1e071c48269 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchDocValuesContext.java @@ -10,6 +10,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.SearchExecutionContext; +import java.util.ArrayList; import java.util.Collection; import java.util.LinkedHashMap; import java.util.List; @@ -24,7 +25,7 @@ */ public class FetchDocValuesContext { - private final Collection fields; + private final List fields; /** * Create a new FetchDocValuesContext using the provided input list. @@ -40,7 +41,7 @@ public FetchDocValuesContext(SearchExecutionContext searchExecutionContext, List fieldToFormats.put(fieldName, new FieldAndFormat(fieldName, field.format, field.includeUnmapped)); } } - this.fields = fieldToFormats.values(); + this.fields = new ArrayList<>(fieldToFormats.values()); int maxAllowedDocvalueFields = searchExecutionContext.getIndexSettings().getMaxDocvalueFields(); if (fields.size() > maxAllowedDocvalueFields) { throw new IllegalArgumentException( @@ -58,7 +59,7 @@ public FetchDocValuesContext(SearchExecutionContext searchExecutionContext, List /** * Returns the required docvalue fields. */ - public Collection fields() { + public List fields() { return this.fields; } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java index e7fa0e67cb453..3d180dd094b18 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -218,6 +218,9 @@ private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, Analyzer an // Can't split on term boundaries without offsets return -1; } + if (contents.length() <= noMatchSize) { + return contents.length(); + } int end = -1; tokenStream.reset(); while (tokenStream.incrementToken()) { diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java index 915feaad6e339..02834f03f54ab 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java @@ -24,7 +24,8 @@ /** * {@code RankFeaturePhaseRankCoordinatorContext} is a base class that runs on the coordinating node and is responsible for retrieving - * {@code window_size} total results from all shards, rank them, and then produce a final paginated response of [from, from+size] results. + * {@code rank_window_size} total results from all shards, rank them, and then produce a final paginated response of [from, from+size] + * results. */ public abstract class RankFeaturePhaseRankCoordinatorContext { @@ -44,6 +45,16 @@ public RankFeaturePhaseRankCoordinatorContext(int size, int from, int rankWindow */ protected abstract void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener); + /** + * Preprocesses the provided documents: sorts them by score descending. + * @param originalDocs documents to process + */ + protected RankFeatureDoc[] preprocess(RankFeatureDoc[] originalDocs) { + return Arrays.stream(originalDocs) + .sorted(Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()) + .toArray(RankFeatureDoc[]::new); + } + /** * This method is responsible for ranking the global results based on the provided rank feature results from each shard. *

      @@ -63,22 +74,28 @@ public void computeRankScoresForGlobalResults( RankFeatureDoc[] featureDocs = extractFeatureDocs(rankSearchResults); // generate the final `topResults` results, and pass them to fetch phase through the `rankListener` - computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { - for (int i = 0; i < featureDocs.length; i++) { - featureDocs[i].score = scores[i]; - } - listener.onResponse(featureDocs); - })); + if (featureDocs.length == 0) { + rankListener.onResponse(new RankFeatureDoc[0]); + } else { + computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { + for (int i = 0; i < featureDocs.length; i++) { + featureDocs[i].score = scores[i]; + } + listener.onResponse(featureDocs); + })); + } } /** - * Ranks the provided {@link RankFeatureDoc} array and paginates the results based on the `from` and `size` parameters. + * Ranks the provided {@link RankFeatureDoc} array and paginates the results based on the `from` and `size` parameters. Filters out + * documents that have a relevance score less than min_score. + * @param rankFeatureDocs documents to process */ public RankFeatureDoc[] rankAndPaginate(RankFeatureDoc[] rankFeatureDocs) { - Arrays.sort(rankFeatureDocs, Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()); - RankFeatureDoc[] topResults = new RankFeatureDoc[Math.max(0, Math.min(size, rankFeatureDocs.length - from))]; + RankFeatureDoc[] sortedDocs = preprocess(rankFeatureDocs); + RankFeatureDoc[] topResults = new RankFeatureDoc[Math.max(0, Math.min(size, sortedDocs.length - from))]; for (int rank = 0; rank < topResults.length; ++rank) { - topResults[rank] = rankFeatureDocs[from + rank]; + topResults[rank] = sortedDocs[from + rank]; topResults[rank].rank = from + rank + 1; } return topResults; diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java index 5cd5a888581c8..946fd46fe6aec 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.rescore; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -17,6 +16,7 @@ import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -120,7 +120,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public ActionRequestValidationException validate(SearchRequest searchRequest, ActionRequestValidationException validationException) { + public ActionRequestValidationException validate(SearchSourceBuilder source, ActionRequestValidationException validationException) { return validationException; } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index 6e3d2a58dbd5d..3a9979030683a 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -14,6 +14,8 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.FilterXContentParserWrapper; @@ -33,16 +35,17 @@ /** * A retriever represents an API element that returns an ordered list of top * documents. These can be obtained from a query, from another retriever, etc. - * Internally, a {@link RetrieverBuilder} is just a wrapper for other search - * elements that are extracted into a {@link SearchSourceBuilder}. The advantage - * retrievers have is in the API they appear as a tree-like structure enabling + * Internally, a {@link RetrieverBuilder} is first rewritten into its simplest + * form and then its elements are extracted into a {@link SearchSourceBuilder}. + * + * The advantage retrievers have is in the API they appear as a tree-like structure enabling * easier reasoning about what a search does. * * This is the base class for all other retrievers. This class does not support * serialization and is expected to be fully extracted to a {@link SearchSourceBuilder} * prior to any transport calls. */ -public abstract class RetrieverBuilder implements ToXContent { +public abstract class RetrieverBuilder implements Rewriteable, ToXContent { public static final NodeFeature RETRIEVERS_SUPPORTED = new NodeFeature("retrievers_supported"); @@ -181,6 +184,13 @@ protected static RetrieverBuilder parseInnerRetrieverBuilder(XContentParser pars protected String retrieverName; + /** + * Determines if this retriever contains sub-retrievers that need to be executed prior to search. + */ + public boolean isCompound() { + return false; + } + /** * Gets the filters for this retriever. */ @@ -188,8 +198,13 @@ public List getPreFilterQueryBuilders() { return preFilterQueryBuilders; } + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + return this; + } + /** - * This method is called at the end of parsing on behalf of a {@link SearchSourceBuilder}. + * This method is called at the end of rewriting on behalf of a {@link SearchSourceBuilder}. * Elements from retrievers are expected to be "extracted" into the {@link SearchSourceBuilder}. */ public abstract void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed); diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenByteKnnVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenByteKnnVectorQuery.java index f5f3ac8e8fe24..bf250a2f35184 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenByteKnnVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenByteKnnVectorQuery.java @@ -15,15 +15,24 @@ import org.elasticsearch.search.profile.query.QueryProfiler; public class ESDiversifyingChildrenByteKnnVectorQuery extends DiversifyingChildrenByteKnnVectorQuery implements ProfilingQuery { + private final Integer kParam; private long vectorOpsCount; - public ESDiversifyingChildrenByteKnnVectorQuery(String field, byte[] query, Query childFilter, int k, BitSetProducer parentsFilter) { - super(field, query, childFilter, k, parentsFilter); + public ESDiversifyingChildrenByteKnnVectorQuery( + String field, + byte[] query, + Query childFilter, + Integer k, + int numCands, + BitSetProducer parentsFilter + ) { + super(field, query, childFilter, numCands, parentsFilter); + this.kParam = k; } @Override protected TopDocs mergeLeafResults(TopDocs[] perLeafResults) { - TopDocs topK = super.mergeLeafResults(perLeafResults); + TopDocs topK = kParam == null ? super.mergeLeafResults(perLeafResults) : TopDocs.merge(kParam, perLeafResults); vectorOpsCount = topK.totalHits.value; return topK; } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenFloatKnnVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenFloatKnnVectorQuery.java index 1ecb7e5afd044..59b8f26902367 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenFloatKnnVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESDiversifyingChildrenFloatKnnVectorQuery.java @@ -15,15 +15,24 @@ import org.elasticsearch.search.profile.query.QueryProfiler; public class ESDiversifyingChildrenFloatKnnVectorQuery extends DiversifyingChildrenFloatKnnVectorQuery implements ProfilingQuery { + private final Integer kParam; private long vectorOpsCount; - public ESDiversifyingChildrenFloatKnnVectorQuery(String field, float[] query, Query childFilter, int k, BitSetProducer parentsFilter) { - super(field, query, childFilter, k, parentsFilter); + public ESDiversifyingChildrenFloatKnnVectorQuery( + String field, + float[] query, + Query childFilter, + Integer k, + int numCands, + BitSetProducer parentsFilter + ) { + super(field, query, childFilter, numCands, parentsFilter); + this.kParam = k; } @Override protected TopDocs mergeLeafResults(TopDocs[] perLeafResults) { - TopDocs topK = super.mergeLeafResults(perLeafResults); + TopDocs topK = kParam == null ? super.mergeLeafResults(perLeafResults) : TopDocs.merge(kParam, perLeafResults); vectorOpsCount = topK.totalHits.value; return topK; } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java index 05cf52fd23f24..9808d97ec8253 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java @@ -14,16 +14,18 @@ import org.elasticsearch.search.profile.query.QueryProfiler; public class ESKnnByteVectorQuery extends KnnByteVectorQuery implements ProfilingQuery { - + private final Integer kParam; private long vectorOpsCount; - public ESKnnByteVectorQuery(String field, byte[] target, int k, Query filter) { - super(field, target, k, filter); + public ESKnnByteVectorQuery(String field, byte[] target, Integer k, int numCands, Query filter) { + super(field, target, numCands, filter); + this.kParam = k; } @Override protected TopDocs mergeLeafResults(TopDocs[] perLeafResults) { - TopDocs topK = super.mergeLeafResults(perLeafResults); + // if k param is set, we get only top k results from each shard + TopDocs topK = kParam == null ? super.mergeLeafResults(perLeafResults) : TopDocs.merge(kParam, perLeafResults); vectorOpsCount = topK.totalHits.value; return topK; } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java index e83a90a3c4df8..aad4005eb83ed 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java @@ -14,15 +14,18 @@ import org.elasticsearch.search.profile.query.QueryProfiler; public class ESKnnFloatVectorQuery extends KnnFloatVectorQuery implements ProfilingQuery { + private final Integer kParam; private long vectorOpsCount; - public ESKnnFloatVectorQuery(String field, float[] target, int k, Query filter) { - super(field, target, k, filter); + public ESKnnFloatVectorQuery(String field, float[] target, Integer k, int numCands, Query filter) { + super(field, target, numCands, filter); + this.kParam = k; } @Override protected TopDocs mergeLeafResults(TopDocs[] perLeafResults) { - TopDocs topK = super.mergeLeafResults(perLeafResults); + // if k param is set, we get only top k results from each shard + TopDocs topK = kParam == null ? super.mergeLeafResults(perLeafResults) : TopDocs.merge(kParam, perLeafResults); vectorOpsCount = topK.totalHits.value; return topK; } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java index 3c03d3258ebab..348a65d0c4960 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java @@ -298,6 +298,10 @@ public String getField() { return field; } + public List getFilterQueries() { + return filterQueries; + } + public KnnSearchBuilder addFilterQuery(QueryBuilder filterQuery) { Objects.requireNonNull(filterQuery); this.filterQueries.add(filterQuery); @@ -398,7 +402,7 @@ public KnnVectorQueryBuilder toQueryBuilder() { if (queryVectorBuilder != null) { throw new IllegalArgumentException("missing rewrite"); } - return new KnnVectorQueryBuilder(field, queryVector, numCands, similarity).boost(boost) + return new KnnVectorQueryBuilder(field, queryVector, null, numCands, similarity).boost(boost) .queryName(queryName) .addFilterQueries(filterQueries); } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchRequestParser.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchRequestParser.java index 8e5c24d0398b9..237bb7e832c3e 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchRequestParser.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchRequestParser.java @@ -255,7 +255,7 @@ public KnnVectorQueryBuilder toQueryBuilder() { if (numCands > NUM_CANDS_LIMIT) { throw new IllegalArgumentException("[" + NUM_CANDS_FIELD.getPreferredName() + "] cannot exceed [" + NUM_CANDS_LIMIT + "]"); } - return new KnnVectorQueryBuilder(field, queryVector, numCands, null); + return new KnnVectorQueryBuilder(field, queryVector, null, numCands, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 0c8dfc9a98330..f1b1c24c50788 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ToChildBlockJoinQuery; @@ -20,6 +21,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; @@ -52,11 +54,14 @@ * {@link org.apache.lucene.search.KnnByteVectorQuery}. */ public class KnnVectorQueryBuilder extends AbstractQueryBuilder { + public static final NodeFeature K_PARAM_SUPPORTED = new NodeFeature("search.vectors.k_param_supported"); + public static final String NAME = "knn"; private static final int NUM_CANDS_LIMIT = 10_000; private static final float NUM_CANDS_MULTIPLICATIVE_FACTOR = 1.5f; public static final ParseField FIELD_FIELD = new ParseField("field"); + public static final ParseField K_FIELD = new ParseField("k"); public static final ParseField NUM_CANDS_FIELD = new ParseField("num_candidates"); public static final ParseField QUERY_VECTOR_FIELD = new ParseField("query_vector"); public static final ParseField VECTOR_SIMILARITY_FIELD = new ParseField("similarity"); @@ -69,10 +74,11 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder new KnnVectorQueryBuilder( (String) args[0], (VectorData) args[1], - (QueryVectorBuilder) args[4], + (QueryVectorBuilder) args[5], null, (Integer) args[2], - (Float) args[3] + (Integer) args[3], + (Float) args[4] ) ); @@ -84,6 +90,7 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder filterQueries = new ArrayList<>(); private final Float vectorSimilarity; private final QueryVectorBuilder queryVectorBuilder; private final Supplier queryVectorSupplier; - public KnnVectorQueryBuilder(String fieldName, float[] queryVector, Integer numCands, Float vectorSimilarity) { - this(fieldName, VectorData.fromFloats(queryVector), null, null, numCands, vectorSimilarity); + public KnnVectorQueryBuilder(String fieldName, float[] queryVector, Integer k, Integer numCands, Float vectorSimilarity) { + this(fieldName, VectorData.fromFloats(queryVector), null, null, k, numCands, vectorSimilarity); } - protected KnnVectorQueryBuilder(String fieldName, QueryVectorBuilder queryVectorBuilder, Integer numCands, Float vectorSimilarity) { - this(fieldName, null, queryVectorBuilder, null, numCands, vectorSimilarity); + protected KnnVectorQueryBuilder( + String fieldName, + QueryVectorBuilder queryVectorBuilder, + Integer k, + Integer numCands, + Float vectorSimilarity + ) { + this(fieldName, null, queryVectorBuilder, null, k, numCands, vectorSimilarity); } - public KnnVectorQueryBuilder(String fieldName, byte[] queryVector, Integer numCands, Float vectorSimilarity) { - this(fieldName, VectorData.fromBytes(queryVector), null, null, numCands, vectorSimilarity); + public KnnVectorQueryBuilder(String fieldName, byte[] queryVector, Integer k, Integer numCands, Float vectorSimilarity) { + this(fieldName, VectorData.fromBytes(queryVector), null, null, k, numCands, vectorSimilarity); } - public KnnVectorQueryBuilder(String fieldName, VectorData queryVector, Integer numCands, Float vectorSimilarity) { - this(fieldName, queryVector, null, null, numCands, vectorSimilarity); + public KnnVectorQueryBuilder(String fieldName, VectorData queryVector, Integer k, Integer numCands, Float vectorSimilarity) { + this(fieldName, queryVector, null, null, k, numCands, vectorSimilarity); } private KnnVectorQueryBuilder( @@ -133,12 +147,21 @@ private KnnVectorQueryBuilder( VectorData queryVector, QueryVectorBuilder queryVectorBuilder, Supplier queryVectorSupplier, + Integer k, Integer numCands, Float vectorSimilarity ) { + if (k != null && k < 1) { + throw new IllegalArgumentException("[" + K_FIELD.getPreferredName() + "] must be greater than 0"); + } if (numCands != null && numCands > NUM_CANDS_LIMIT) { throw new IllegalArgumentException("[" + NUM_CANDS_FIELD.getPreferredName() + "] cannot exceed [" + NUM_CANDS_LIMIT + "]"); } + if (k != null && numCands != null && numCands < k) { + throw new IllegalArgumentException( + "[" + NUM_CANDS_FIELD.getPreferredName() + "] cannot be less than [" + K_FIELD.getPreferredName() + "]" + ); + } if (queryVector == null && queryVectorBuilder == null) { throw new IllegalArgumentException( format( @@ -158,6 +181,7 @@ private KnnVectorQueryBuilder( } this.fieldName = fieldName; this.queryVector = queryVector; + this.k = k; this.numCands = numCands; this.vectorSimilarity = vectorSimilarity; this.queryVectorBuilder = queryVectorBuilder; @@ -167,6 +191,11 @@ private KnnVectorQueryBuilder( public KnnVectorQueryBuilder(StreamInput in) throws IOException { super(in); this.fieldName = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.K_FOR_KNN_QUERY_ADDED)) { + this.k = in.readOptionalVInt(); + } else { + this.k = null; + } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { this.numCands = in.readOptionalVInt(); } else { @@ -214,6 +243,10 @@ public Float getVectorSimilarity() { return vectorSimilarity; } + public Integer k() { + return k; + } + public Integer numCands() { return numCands; } @@ -245,6 +278,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { throw new IllegalStateException("missing a rewriteAndFetch?"); } out.writeString(fieldName); + if (out.getTransportVersion().onOrAfter(TransportVersions.K_FOR_KNN_QUERY_ADDED)) { + out.writeOptionalVInt(k); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeOptionalVInt(numCands); } else { @@ -302,6 +338,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep if (queryVector != null) { builder.field(QUERY_VECTOR_FIELD.getPreferredName(), queryVector); } + if (k != null) { + builder.field(K_FIELD.getPreferredName(), k); + } if (numCands != null) { builder.field(NUM_CANDS_FIELD.getPreferredName(), numCands); } @@ -335,7 +374,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext ctx) throws IOException { if (queryVectorSupplier.get() == null) { return this; } - return new KnnVectorQueryBuilder(fieldName, queryVectorSupplier.get(), numCands, vectorSimilarity).boost(boost) + return new KnnVectorQueryBuilder(fieldName, queryVectorSupplier.get(), k, numCands, vectorSimilarity).boost(boost) .queryName(queryName) .addFilterQueries(filterQueries); } @@ -357,7 +396,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext ctx) throws IOException { } ll.onResponse(null); }))); - return new KnnVectorQueryBuilder(fieldName, queryVector, queryVectorBuilder, toSet::get, numCands, vectorSimilarity).boost( + return new KnnVectorQueryBuilder(fieldName, queryVector, queryVectorBuilder, toSet::get, k, numCands, vectorSimilarity).boost( boost ).queryName(queryName).addFilterQueries(filterQueries); } @@ -377,7 +416,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext ctx) throws IOException { rewrittenQueries.add(rewrittenQuery); } if (changed) { - return new KnnVectorQueryBuilder(fieldName, queryVector, queryVectorBuilder, queryVectorSupplier, numCands, vectorSimilarity) + return new KnnVectorQueryBuilder(fieldName, queryVector, queryVectorBuilder, queryVectorSupplier, k, numCands, vectorSimilarity) .boost(boost) .queryName(queryName) .addFilterQueries(rewrittenQueries); @@ -388,12 +427,17 @@ protected QueryBuilder doRewrite(QueryRewriteContext ctx) throws IOException { @Override protected Query doToQuery(SearchExecutionContext context) throws IOException { MappedFieldType fieldType = context.getFieldType(fieldName); - int requestSize = context.requestSize() == null || context.requestSize() < 0 ? DEFAULT_SIZE : context.requestSize(); + int requestSize; + if (k != null) { + requestSize = k; + } else { + requestSize = context.requestSize() == null || context.requestSize() < 0 ? DEFAULT_SIZE : context.requestSize(); + } int adjustedNumCands = numCands == null ? Math.round(Math.min(NUM_CANDS_MULTIPLICATIVE_FACTOR * requestSize, NUM_CANDS_LIMIT)) : numCands; if (fieldType == null) { - throw new IllegalArgumentException("field [" + fieldName + "] does not exist in the mapping"); + return new MatchNoDocsQuery(); } if (fieldType instanceof DenseVectorFieldType == false) { @@ -446,20 +490,21 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { // Now join the filterQuery & parentFilter to provide the matching blocks of children filterQuery = new ToChildBlockJoinQuery(filterQuery, parentBitSet); } - return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, parentBitSet); + return vectorFieldType.createKnnQuery(queryVector, k, adjustedNumCands, filterQuery, vectorSimilarity, parentBitSet); } - return vectorFieldType.createKnnQuery(queryVector, adjustedNumCands, filterQuery, vectorSimilarity, null); + return vectorFieldType.createKnnQuery(queryVector, k, adjustedNumCands, filterQuery, vectorSimilarity, null); } @Override protected int doHashCode() { - return Objects.hash(fieldName, Objects.hashCode(queryVector), numCands, filterQueries, vectorSimilarity, queryVectorBuilder); + return Objects.hash(fieldName, Objects.hashCode(queryVector), k, numCands, filterQueries, vectorSimilarity, queryVectorBuilder); } @Override protected boolean doEquals(KnnVectorQueryBuilder other) { return Objects.equals(fieldName, other.fieldName) && Objects.equals(queryVector, other.queryVector) + && Objects.equals(k, other.k) && Objects.equals(numCands, other.numCands) && Objects.equals(filterQueries, other.filterQueries) && Objects.equals(vectorSimilarity, other.vectorSimilarity) diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 453d0b3201560..25796606f2b1b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -1331,7 +1332,11 @@ public ClusterState execute(ClusterState currentState) { ensureValidIndexName(currentState, snapshotIndexMetadata, renamedIndexName); shardLimitValidator.validateShardLimit(snapshotIndexMetadata.getSettings(), currentState); - final IndexMetadata.Builder indexMdBuilder = restoreToCreateNewIndex(snapshotIndexMetadata, renamedIndexName); + final IndexMetadata.Builder indexMdBuilder = restoreToCreateNewIndex( + snapshotIndexMetadata, + renamedIndexName, + currentState.getMinTransportVersion() + ); if (request.includeAliases() == false && snapshotIndexMetadata.getAliases().isEmpty() == false && isSystemIndex(snapshotIndexMetadata) == false) { @@ -1349,7 +1354,11 @@ && isSystemIndex(snapshotIndexMetadata) == false) { } else { // Index exists and it's closed - open it in metadata and start recovery validateExistingClosedIndex(currentIndexMetadata, snapshotIndexMetadata, renamedIndexName, partial); - final IndexMetadata.Builder indexMdBuilder = restoreOverClosedIndex(snapshotIndexMetadata, currentIndexMetadata); + final IndexMetadata.Builder indexMdBuilder = restoreOverClosedIndex( + snapshotIndexMetadata, + currentIndexMetadata, + currentState.getMinTransportVersion() + ); if (request.includeAliases() == false && isSystemIndex(snapshotIndexMetadata) == false) { // Remove all snapshot aliases @@ -1724,24 +1733,35 @@ private static IndexMetadata convertLegacyIndex( return convertedIndexMetadataBuilder.build(); } - private static IndexMetadata.Builder restoreToCreateNewIndex(IndexMetadata snapshotIndexMetadata, String renamedIndexName) { + private static IndexMetadata.Builder restoreToCreateNewIndex( + IndexMetadata snapshotIndexMetadata, + String renamedIndexName, + TransportVersion minClusterTransportVersion + ) { return IndexMetadata.builder(snapshotIndexMetadata) .state(IndexMetadata.State.OPEN) .index(renamedIndexName) .settings( Settings.builder().put(snapshotIndexMetadata.getSettings()).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ) - .timestampRange(IndexLongFieldRange.NO_SHARDS); + .timestampRange(IndexLongFieldRange.NO_SHARDS) + .eventIngestedRange(IndexLongFieldRange.NO_SHARDS, minClusterTransportVersion); } - private static IndexMetadata.Builder restoreOverClosedIndex(IndexMetadata snapshotIndexMetadata, IndexMetadata currentIndexMetadata) { + private static IndexMetadata.Builder restoreOverClosedIndex( + IndexMetadata snapshotIndexMetadata, + IndexMetadata currentIndexMetadata, + TransportVersion minTransportVersion + ) { final IndexMetadata.Builder indexMdBuilder = IndexMetadata.builder(snapshotIndexMetadata) .state(IndexMetadata.State.OPEN) .version(Math.max(snapshotIndexMetadata.getVersion(), 1 + currentIndexMetadata.getVersion())) .mappingVersion(Math.max(snapshotIndexMetadata.getMappingVersion(), 1 + currentIndexMetadata.getMappingVersion())) + .mappingsUpdatedVersion(snapshotIndexMetadata.getMappingsUpdatedVersion()) .settingsVersion(Math.max(snapshotIndexMetadata.getSettingsVersion(), 1 + currentIndexMetadata.getSettingsVersion())) .aliasesVersion(Math.max(snapshotIndexMetadata.getAliasesVersion(), 1 + currentIndexMetadata.getAliasesVersion())) .timestampRange(IndexLongFieldRange.NO_SHARDS) + .eventIngestedRange(IndexLongFieldRange.NO_SHARDS, minTransportVersion) .index(currentIndexMetadata.getIndex().getName()) .settings( Settings.builder() diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 7b3a83dfc9bb3..1529ef556037a 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -241,7 +241,7 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, boolean r } if (removingLocalNode) { - pauseShardSnapshots(localNodeId, entry); + pauseShardSnapshotsForNodeRemoval(localNodeId, entry); } else { startNewShardSnapshots(localNodeId, entry); } @@ -318,7 +318,7 @@ private void startNewShardSnapshots(String localNodeId, SnapshotsInProgress.Entr threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> shardSnapshotTasks.forEach(Runnable::run)); } - private void pauseShardSnapshots(String localNodeId, SnapshotsInProgress.Entry entry) { + private void pauseShardSnapshotsForNodeRemoval(String localNodeId, SnapshotsInProgress.Entry entry) { final var localShardSnapshots = shardSnapshots.getOrDefault(entry.snapshot(), Map.of()); for (final Map.Entry shardEntry : entry.shards().entrySet()) { @@ -545,8 +545,8 @@ private String description() { public static String getShardStateId(IndexShard indexShard, IndexCommit snapshotIndexCommit) throws IOException { final Map userCommitData = snapshotIndexCommit.getUserData(); final SequenceNumbers.CommitInfo seqNumInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userCommitData.entrySet()); - final long maxSeqNo = seqNumInfo.maxSeqNo; - if (maxSeqNo != seqNumInfo.localCheckpoint || maxSeqNo != indexShard.getLastSyncedGlobalCheckpoint()) { + final long maxSeqNo = seqNumInfo.maxSeqNo(); + if (maxSeqNo != seqNumInfo.localCheckpoint() || maxSeqNo != indexShard.getLastSyncedGlobalCheckpoint()) { return null; } return userCommitData.get(Engine.HISTORY_UUID_KEY) @@ -606,8 +606,9 @@ private void syncShardStatsOnNewMaster(List entries) } else if (stage == Stage.PAUSED) { // but we think the shard has paused - we need to make new master know that logger.debug(""" - [{}] new master thinks the shard [{}] is still running but the shard paused locally, updating status on \ - master""", snapshot.snapshot(), shardId); + new master thinks that shard [{}] snapshot [{}], with shard generation [{}], is still running, but the \ + shard snapshot is paused locally, updating status on master + """, shardId, snapshot.snapshot(), localShard.getValue().generation()); notifyUnsuccessfulSnapshotShard( snapshot.snapshot(), shardId, @@ -648,6 +649,14 @@ private void notifyUnsuccessfulSnapshotShard( shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), shardState, generation, failure) ); + if (shardState == ShardState.PAUSED_FOR_NODE_REMOVAL) { + logger.debug( + "Pausing shard [{}] snapshot [{}], with shard generation [{}], because this node is marked for removal", + shardId, + snapshot, + generation + ); + } } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index cd7516a8f1232..9178050ff2a0b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -999,39 +999,42 @@ public ClusterState execute(ClusterState currentState) { // We keep a cache of shards that failed in this map. If we fail a shardId for a given repository because of // a node leaving or shard becoming unassigned for one snapshot, we will also fail it for all subsequent enqueued // snapshots for the same repository + // // TODO: the code in this state update duplicates large chunks of the logic in #SHARD_STATE_EXECUTOR. // We should refactor it to ideally also go through #SHARD_STATE_EXECUTOR by hand-crafting shard state updates // that encapsulate nodes leaving or indices having been deleted and passing them to the executor instead. - SnapshotsInProgress updated = snapshots; + SnapshotsInProgress updatedSnapshots = snapshots; + for (final List snapshotsInRepo : snapshots.entriesByRepo()) { boolean changed = false; final List updatedEntriesForRepo = new ArrayList<>(); final Map knownFailures = new HashMap<>(); - final String repository = snapshotsInRepo.get(0).repository(); - for (SnapshotsInProgress.Entry snapshot : snapshotsInRepo) { - if (statesToUpdate.contains(snapshot.state())) { - if (snapshot.isClone()) { - if (snapshot.shardsByRepoShardId().isEmpty()) { + final String repositoryName = snapshotsInRepo.get(0).repository(); + for (SnapshotsInProgress.Entry snapshotEntry : snapshotsInRepo) { + if (statesToUpdate.contains(snapshotEntry.state())) { + if (snapshotEntry.isClone()) { + if (snapshotEntry.shardsByRepoShardId().isEmpty()) { // Currently initializing clone - if (initializingClones.contains(snapshot.snapshot())) { - updatedEntriesForRepo.add(snapshot); + if (initializingClones.contains(snapshotEntry.snapshot())) { + updatedEntriesForRepo.add(snapshotEntry); } else { - logger.debug("removing not yet start clone operation [{}]", snapshot); + logger.debug("removing not yet start clone operation [{}]", snapshotEntry); changed = true; } } else { // see if any clones may have had a shard become available for execution because of failures - if (deletes.hasExecutingDeletion(repository)) { + if (deletes.hasExecutingDeletion(repositoryName)) { // Currently executing a delete for this repo, no need to try and update any clone operations. // The logic for finishing the delete will update running clones with the latest changes. - updatedEntriesForRepo.add(snapshot); + updatedEntriesForRepo.add(snapshotEntry); continue; } ImmutableOpenMap.Builder clones = null; InFlightShardSnapshotStates inFlightShardSnapshotStates = null; for (Map.Entry failureEntry : knownFailures.entrySet()) { final RepositoryShardId repositoryShardId = failureEntry.getKey(); - final ShardSnapshotStatus existingStatus = snapshot.shardsByRepoShardId().get(repositoryShardId); + final ShardSnapshotStatus existingStatus = snapshotEntry.shardsByRepoShardId() + .get(repositoryShardId); if (ShardSnapshotStatus.UNASSIGNED_QUEUED.equals(existingStatus)) { if (inFlightShardSnapshotStates == null) { inFlightShardSnapshotStates = InFlightShardSnapshotStates.forEntries(updatedEntriesForRepo); @@ -1044,7 +1047,7 @@ public ClusterState execute(ClusterState currentState) { continue; } if (clones == null) { - clones = ImmutableOpenMap.builder(snapshot.shardsByRepoShardId()); + clones = ImmutableOpenMap.builder(snapshotEntry.shardsByRepoShardId()); } // We can use the generation from the shard failure to start the clone operation here // because #processWaitingShardsAndRemovedNodes adds generations to failure statuses that @@ -1060,50 +1063,54 @@ public ClusterState execute(ClusterState currentState) { } if (clones != null) { changed = true; - updatedEntriesForRepo.add(snapshot.withClones(clones.build())); + updatedEntriesForRepo.add(snapshotEntry.withClones(clones.build())); } else { - updatedEntriesForRepo.add(snapshot); + updatedEntriesForRepo.add(snapshotEntry); } } } else { + // Not a clone, and the snapshot is in STARTED or ABORTED state. + ImmutableOpenMap shards = processWaitingShardsAndRemovedNodes( - snapshot, + snapshotEntry, routingTable, nodes, snapshots::isNodeIdForRemoval, knownFailures ); if (shards != null) { - final SnapshotsInProgress.Entry updatedSnapshot = snapshot.withShardStates(shards); + final SnapshotsInProgress.Entry updatedSnapshot = snapshotEntry.withShardStates(shards); changed = true; if (updatedSnapshot.state().completed()) { finishedSnapshots.add(updatedSnapshot); } updatedEntriesForRepo.add(updatedSnapshot); } else { - updatedEntriesForRepo.add(snapshot); + updatedEntriesForRepo.add(snapshotEntry); } } - } else if (snapshot.repositoryStateId() == RepositoryData.UNKNOWN_REPO_GEN) { + } else if (snapshotEntry.repositoryStateId() == RepositoryData.UNKNOWN_REPO_GEN) { // BwC path, older versions could create entries with unknown repo GEN in INIT or ABORTED state that did not // yet write anything to the repository physically. This means we can simply remove these from the cluster // state without having to do any additional cleanup. changed = true; - logger.debug("[{}] was found in dangling INIT or ABORTED state", snapshot); + logger.debug("[{}] was found in dangling INIT or ABORTED state", snapshotEntry); } else { - if (snapshot.state().completed() || completed(snapshot.shardsByRepoShardId().values())) { - finishedSnapshots.add(snapshot); + // Now we're down to completed or un-modified snapshots + + if (snapshotEntry.state().completed() || completed(snapshotEntry.shardsByRepoShardId().values())) { + finishedSnapshots.add(snapshotEntry); } - updatedEntriesForRepo.add(snapshot); + updatedEntriesForRepo.add(snapshotEntry); } } if (changed) { - updated = updated.withUpdatedEntriesForRepo(repository, updatedEntriesForRepo); + updatedSnapshots = updatedSnapshots.withUpdatedEntriesForRepo(repositoryName, updatedEntriesForRepo); } } final ClusterState res = readyDeletions( - updated != snapshots - ? ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updated).build() + updatedSnapshots != snapshots + ? ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build() : currentState ).v1(); for (SnapshotDeletionsInProgress.Entry delete : SnapshotDeletionsInProgress.get(res).getEntries()) { @@ -1151,31 +1158,39 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) }); } + /** + * Walks through the snapshot entries' shard snapshots and creates applies updates from looking at removed nodes or indexes and known + * failed shard snapshots on the same shard IDs. + * + * @param nodeIdRemovalPredicate identify any nodes that are marked for removal / in shutdown mode + * @param knownFailures already known failed shard snapshots, but more may be found in this method + * @return an updated map of shard statuses + */ private static ImmutableOpenMap processWaitingShardsAndRemovedNodes( - SnapshotsInProgress.Entry entry, + SnapshotsInProgress.Entry snapshotEntry, RoutingTable routingTable, DiscoveryNodes nodes, Predicate nodeIdRemovalPredicate, Map knownFailures ) { - assert entry.isClone() == false : "clones take a different path"; + assert snapshotEntry.isClone() == false : "clones take a different path"; boolean snapshotChanged = false; ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - for (Map.Entry shardEntry : entry.shardsByRepoShardId().entrySet()) { - ShardSnapshotStatus shardStatus = shardEntry.getValue(); - ShardId shardId = entry.shardId(shardEntry.getKey()); + for (Map.Entry shardSnapshotEntry : snapshotEntry.shardsByRepoShardId().entrySet()) { + ShardSnapshotStatus shardStatus = shardSnapshotEntry.getValue(); + ShardId shardId = snapshotEntry.shardId(shardSnapshotEntry.getKey()); if (shardStatus.equals(ShardSnapshotStatus.UNASSIGNED_QUEUED)) { // this shard snapshot is waiting for a previous snapshot to finish execution for this shard - final ShardSnapshotStatus knownFailure = knownFailures.get(shardEntry.getKey()); + final ShardSnapshotStatus knownFailure = knownFailures.get(shardSnapshotEntry.getKey()); if (knownFailure == null) { final IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable == null) { // shard became unassigned while queued after a delete or clone operation so we can fail as missing here - assert entry.partial(); + assert snapshotEntry.partial(); snapshotChanged = true; logger.debug("failing snapshot of shard [{}] because index got deleted", shardId); shards.put(shardId, ShardSnapshotStatus.MISSING); - knownFailures.put(shardEntry.getKey(), ShardSnapshotStatus.MISSING); + knownFailures.put(shardSnapshotEntry.getKey(), ShardSnapshotStatus.MISSING); } else { // if no failure is known for the shard we keep waiting shards.put(shardId, shardStatus); @@ -1187,6 +1202,7 @@ private static ImmutableOpenMap processWaitingShar shards.put(shardId, knownFailure); } } else if (shardStatus.state() == ShardState.WAITING || shardStatus.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { + // The shard primary wasn't assigned, or the shard snapshot was paused because the node was shutting down. IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); @@ -1208,7 +1224,10 @@ private static ImmutableOpenMap processWaitingShar } else if (shardRouting.primaryShard().started()) { // Shard that we were waiting for has started on a node, let's process it snapshotChanged = true; - logger.trace("starting shard that we were waiting for [{}] on node [{}]", shardId, shardStatus.nodeId()); + logger.debug(""" + Starting shard [{}] with shard generation [{}] that we were waiting to start on node [{}]. Previous \ + shard state [{}] + """, shardId, shardStatus.generation(), shardStatus.nodeId(), shardStatus.state()); shards.put(shardId, new ShardSnapshotStatus(primaryNodeId, shardStatus.generation())); continue; } else if (shardRouting.primaryShard().initializing() || shardRouting.primaryShard().relocating()) { @@ -1218,7 +1237,7 @@ private static ImmutableOpenMap processWaitingShar } } } - // Shard that we were waiting for went into unassigned state or disappeared - giving up + // Shard that we were waiting for went into unassigned state or disappeared (index or shard is gone) - giving up snapshotChanged = true; logger.warn("failing snapshot of shard [{}] on unassigned shard [{}]", shardId, shardStatus.nodeId()); final ShardSnapshotStatus failedState = new ShardSnapshotStatus( @@ -1228,7 +1247,7 @@ private static ImmutableOpenMap processWaitingShar "shard is unassigned" ); shards.put(shardId, failedState); - knownFailures.put(shardEntry.getKey(), failedState); + knownFailures.put(shardSnapshotEntry.getKey(), failedState); } else if (shardStatus.state().completed() == false && shardStatus.nodeId() != null) { if (nodes.nodeExists(shardStatus.nodeId())) { shards.put(shardId, shardStatus); @@ -1243,7 +1262,7 @@ private static ImmutableOpenMap processWaitingShar "node left the cluster during snapshot" ); shards.put(shardId, failedState); - knownFailures.put(shardEntry.getKey(), failedState); + knownFailures.put(shardSnapshotEntry.getKey(), failedState); } } else { shards.put(shardId, shardStatus); diff --git a/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java b/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java index 2cd35fd6889bd..6b472336f08e9 100644 --- a/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java +++ b/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.indices.analyze.TransportReloadAnalyzersAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -34,6 +35,7 @@ import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryAction; @@ -43,6 +45,7 @@ import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -65,7 +68,6 @@ */ public class SynonymsManagementAPIService { - private static final Logger logger = LogManager.getLogger(SynonymsManagementAPIService.class); private static final String SYNONYMS_INDEX_NAME_PATTERN = ".synonyms-*"; private static final int SYNONYMS_INDEX_FORMAT = 2; private static final String SYNONYMS_INDEX_CONCRETE_NAME = ".synonyms-" + SYNONYMS_INDEX_FORMAT; @@ -82,10 +84,14 @@ public class SynonymsManagementAPIService { // Identifies synonym set objects stored in the index private static final String SYNONYM_SET_OBJECT_TYPE = "synonym_set"; private static final String SYNONYM_RULE_ID_SEPARATOR = "|"; - public static final int MAX_SYNONYMS_SETS = 10_000; + private static final int MAX_SYNONYMS_SETS = 10_000; private static final String SYNONYM_RULE_ID_FIELD = SynonymRule.ID_FIELD.getPreferredName(); private static final String SYNONYM_SETS_AGG_NAME = "synonym_sets_aggr"; private static final int SYNONYMS_INDEX_MAPPINGS_VERSION = 1; + private final int maxSynonymsSets; + + // Package private for testing + static Logger logger = LogManager.getLogger(SynonymsManagementAPIService.class); private final Client client; @@ -103,7 +109,13 @@ public class SynonymsManagementAPIService { .build(); public SynonymsManagementAPIService(Client client) { + this(client, MAX_SYNONYMS_SETS); + } + + // Used for testing, so we don't need to test for MAX_SYNONYMS_SETS and put unnecessary memory pressure on the test cluster + SynonymsManagementAPIService(Client client, int maxSynonymsSets) { this.client = new OriginSettingClient(client, SYNONYMS_ORIGIN); + this.maxSynonymsSets = maxSynonymsSets; } /* The synonym index stores two object types: @@ -174,7 +186,7 @@ public void getSynonymsSets(int from, int size, ActionListener() { @@ -205,6 +217,38 @@ public void onFailure(Exception e) { }); } + /** + * Retrieves all synonym rules for a synonym set. + * + * @param synonymSetId + * @param listener + */ + public void getSynonymSetRules(String synonymSetId, ActionListener> listener) { + // Check the number of synonym sets, and issue a warning in case there are more than the maximum allowed + client.prepareSearch(SYNONYMS_ALIAS_NAME) + .setSource(new SearchSourceBuilder().size(0).trackTotalHits(true)) + .execute(listener.delegateFailureAndWrap((searchListener, countResponse) -> { + long totalSynonymRules = countResponse.getHits().getTotalHits().value; + if (totalSynonymRules > maxSynonymsSets) { + logger.warn( + "The number of synonym rules in the synonym set [{}] exceeds the maximum allowed." + + " Inconsistent synonyms results may occur", + synonymSetId + ); + } + getSynonymSetRules(synonymSetId, 0, MAX_SYNONYMS_SETS, listener); + })); + } + + /** + * Retrieves synonym rules for a synonym set, with pagination support. This method does not check that pagination is + * correct in terms of the max_result_window setting. + * + * @param synonymSetId + * @param from + * @param size + * @param listener + */ public void getSynonymSetRules(String synonymSetId, int from, int size, ActionListener> listener) { // Retrieves synonym rules, excluding the synonym set object type client.prepareSearch(SYNONYMS_ALIAS_NAME) @@ -257,6 +301,12 @@ private static void logUniqueFailureMessagesWithIndices(List listener) { + if (synonymsSet.length > maxSynonymsSets) { + listener.onFailure( + new IllegalArgumentException("The number of synonyms rules in a synonym set cannot exceed " + maxSynonymsSets) + ); + return; + } deleteSynonymsSetObjects(synonymSetId, listener.delegateFailure((deleteByQueryResponseListener, bulkDeleteResponse) -> { boolean created = bulkDeleteResponse.getDeleted() == 0; final List bulkDeleteFailures = bulkDeleteResponse.getBulkFailures(); @@ -272,20 +322,10 @@ public void putSynonymsSet(String synonymSetId, SynonymRule[] synonymsSet, Actio } // Insert as bulk requests - BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); - try { - // Insert synonym set object - bulkRequestBuilder.add(createSynonymSetIndexRequest(synonymSetId)); - // Insert synonym rules - for (SynonymRule synonymRule : synonymsSet) { - bulkRequestBuilder.add(createSynonymRuleIndexRequest(synonymSetId, synonymRule)); - } - } catch (IOException ex) { - listener.onFailure(ex); - } - - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute(deleteByQueryResponseListener.delegateFailure((bulkInsertResponseListener, bulkInsertResponse) -> { + bulkUpdateSynonymsSet( + synonymSetId, + synonymsSet, + deleteByQueryResponseListener.delegateFailure((bulkInsertResponseListener, bulkInsertResponse) -> { if (bulkInsertResponse.hasFailures()) { logUniqueFailureMessagesWithIndices( Arrays.stream(bulkInsertResponse.getItems()) @@ -303,26 +343,67 @@ public void putSynonymsSet(String synonymSetId, SynonymRule[] synonymsSet, Actio : UpdateSynonymsResultStatus.UPDATED; reloadAnalyzers(synonymSetId, false, bulkInsertResponseListener, updateSynonymsResultStatus); - })); + }) + ); })); } - public void putSynonymRule(String synonymsSetId, SynonymRule synonymRule, ActionListener listener) { - checkSynonymSetExists(synonymsSetId, listener.delegateFailure((l1, obj) -> { - try { - IndexRequest indexRequest = createSynonymRuleIndexRequest(synonymsSetId, synonymRule).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ); - client.index(indexRequest, l1.delegateFailure((l2, indexResponse) -> { - UpdateSynonymsResultStatus updateStatus = indexResponse.status() == RestStatus.CREATED - ? UpdateSynonymsResultStatus.CREATED - : UpdateSynonymsResultStatus.UPDATED; + // Open for testing adding more synonyms set than the limit allows for + void bulkUpdateSynonymsSet(String synonymSetId, SynonymRule[] synonymsSet, ActionListener listener) { + BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); + try { + // Insert synonym set object + bulkRequestBuilder.add(createSynonymSetIndexRequest(synonymSetId)); + // Insert synonym rules + for (SynonymRule synonymRule : synonymsSet) { + bulkRequestBuilder.add(createSynonymRuleIndexRequest(synonymSetId, synonymRule)); + } + } catch (IOException ex) { + listener.onFailure(ex); + } - reloadAnalyzers(synonymsSetId, false, l2, updateStatus); - })); - } catch (IOException e) { - l1.onFailure(e); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).execute(listener); + } + + public void putSynonymRule(String synonymsSetId, SynonymRule synonymRule, ActionListener listener) { + checkSynonymSetExists(synonymsSetId, listener.delegateFailureAndWrap((l1, obj) -> { + // Count synonym rules to check if we're at maximum + BoolQueryBuilder queryFilter = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(SYNONYMS_SET_FIELD, synonymsSetId)) + .filter(QueryBuilders.termQuery(OBJECT_TYPE_FIELD, SYNONYM_RULE_OBJECT_TYPE)); + if (synonymRule.id() != null) { + // Remove the current synonym rule from the count, so we allow updating a rule at max capacity + queryFilter.mustNot(QueryBuilders.termQuery(SYNONYM_RULE_ID_FIELD, synonymRule.id())); } + client.prepareSearch(SYNONYMS_ALIAS_NAME) + .setQuery(queryFilter) + .setSize(0) + .setPreference(Preference.LOCAL.type()) + .setTrackTotalHits(true) + .execute(l1.delegateFailureAndWrap((searchListener, searchResponse) -> { + long synonymsSetSize = searchResponse.getHits().getTotalHits().value; + if (synonymsSetSize >= maxSynonymsSets) { + listener.onFailure( + new IllegalArgumentException("The number of synonym rules in a synonyms set cannot exceed " + maxSynonymsSets) + ); + } else { + indexSynonymRule(synonymsSetId, synonymRule, searchListener); + } + })); + })); + } + + private void indexSynonymRule(String synonymsSetId, SynonymRule synonymRule, ActionListener listener) + throws IOException { + IndexRequest indexRequest = createSynonymRuleIndexRequest(synonymsSetId, synonymRule).setRefreshPolicy( + WriteRequest.RefreshPolicy.IMMEDIATE + ); + client.index(indexRequest, listener.delegateFailure((l2, indexResponse) -> { + UpdateSynonymsResultStatus updateStatus = indexResponse.status() == RestStatus.CREATED + ? UpdateSynonymsResultStatus.CREATED + : UpdateSynonymsResultStatus.UPDATED; + + reloadAnalyzers(synonymsSetId, false, l2, updateStatus); })); } diff --git a/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java b/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java index 8a0aa2033a30e..8a385617bee89 100644 --- a/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java +++ b/server/src/main/java/org/elasticsearch/tasks/CancellableTask.java @@ -109,6 +109,11 @@ public final boolean notifyIfCancelled(ActionListener listener) { return true; } + @Override + public String toString() { + return "CancellableTask{" + super.toString() + ", reason='" + reason + '\'' + ", isCancelled=" + isCancelled + '}'; + } + private TaskCancelledException getTaskCancelledException() { assert Thread.holdsLock(this); assert isCancelled; diff --git a/server/src/main/java/org/elasticsearch/tasks/Task.java b/server/src/main/java/org/elasticsearch/tasks/Task.java index 83ee08574df4e..46eb59c3a8cd8 100644 --- a/server/src/main/java/org/elasticsearch/tasks/Task.java +++ b/server/src/main/java/org/elasticsearch/tasks/Task.java @@ -225,6 +225,8 @@ public String toString() { + parentTask + ", startTime=" + startTime + + ", headers=" + + headers + ", startTimeNanos=" + startTimeNanos + '}'; diff --git a/server/src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java b/server/src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java index 6e1f29353f78a..b54d6bdfe3d75 100644 --- a/server/src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java +++ b/server/src/main/java/org/elasticsearch/transport/NodeNotConnectedException.java @@ -27,4 +27,9 @@ public NodeNotConnectedException(DiscoveryNode node, String msg) { public NodeNotConnectedException(StreamInput in) throws IOException { super(in); } + + @Override + public Throwable fillInStackTrace() { + return this; // this exception doesn't imply a bug, no need for a stack trace + } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index c3d53855a9c75..33ea35ecffd94 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; @@ -518,7 +519,19 @@ public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode n handshake(newConnection, actualProfile.getHandshakeTimeout(), Predicates.always(), listener.map(resp -> { final DiscoveryNode remote = resp.discoveryNode; if (node.equals(remote) == false) { - throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); + throw new ConnectTransportException( + node, + Strings.format( + """ + Connecting to [%s] failed: expected to connect to [%s] but found [%s] instead. Ensure that each node has \ + its own distinct publish address, and that your network is configured so that every connection to a node's \ + publish address is routed to the correct node. See %s for more information.""", + node.getAddress(), + node.descriptionWithoutAttributes(), + remote.descriptionWithoutAttributes(), + ReferenceDocs.NETWORK_BINDING_AND_PUBLISHING + ) + ); } return null; })); diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.KnnVectorsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.KnnVectorsFormat index a9d00d1c441fa..da2a0c4b90f30 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.KnnVectorsFormat +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.KnnVectorsFormat @@ -1,3 +1,5 @@ org.elasticsearch.index.codec.vectors.ES813FlatVectorFormat org.elasticsearch.index.codec.vectors.ES813Int8FlatVectorFormat org.elasticsearch.index.codec.vectors.ES814HnswScalarQuantizedVectorsFormat +org.elasticsearch.index.codec.vectors.ES815HnswBitVectorsFormat +org.elasticsearch.index.codec.vectors.ES815BitFlatVectorFormat diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index 5192ea2b4b108..a9d9c6a5a1938 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -14,6 +14,7 @@ org.elasticsearch.rest.RestFeatures org.elasticsearch.indices.IndicesFeatures org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures org.elasticsearch.index.mapper.MapperFeatures +org.elasticsearch.search.SearchFeatures org.elasticsearch.search.retriever.RetrieversFeatures org.elasticsearch.script.ScriptFeatures org.elasticsearch.reservedstate.service.FileSettingsFeatures diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index ba1dab5589ee2..7d2697539fa13 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -123,3 +123,5 @@ 8.13.4,8595001 8.14.0,8636001 8.14.1,8636001 +8.14.2,8636001 +8.14.3,8636001 diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy index 681a52eb84b8a..812dc30081d84 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -70,19 +70,12 @@ grant codeBase "${codebase.elasticsearch-cli}" { grant codeBase "${codebase.jna}" { // for registering native methods permission java.lang.RuntimePermission "accessDeclaredMembers"; - permission java.lang.reflect.ReflectPermission "newProxyInPackage.org.elasticsearch.preallocate"; }; grant codeBase "${codebase.log4j-api}" { permission java.lang.RuntimePermission "getClassLoader"; }; -grant codeBase "${codebase.elasticsearch-preallocate}" { - // for registering native methods - permission java.lang.RuntimePermission "accessDeclaredMembers"; - permission java.lang.reflect.ReflectPermission "newProxyInPackage.org.elasticsearch.preallocate"; -}; - grant codeBase "${codebase.elasticsearch-simdvec}" { // for access MemorySegmentIndexInput internals permission java.lang.RuntimePermission "accessDeclaredMembers"; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index f3e5bd7a375f1..febcaec1ba057 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -33,5 +33,9 @@ "CONTACT_SUPPORT": "troubleshooting.html#troubleshooting-contact-support", "UNASSIGNED_SHARDS": "red-yellow-cluster-status.html", "EXECUTABLE_JNA_TMPDIR": "executable-jna-tmpdir.html", - "NETWORK_THREADING_MODEL": "modules-network.html#modules-network-threading-model" + "NETWORK_THREADING_MODEL": "modules-network.html#modules-network-threading-model", + "ALLOCATION_EXPLAIN_API": "cluster-allocation-explain.html", + "NETWORK_BINDING_AND_PUBLISHING": "modules-network.html#modules-network-binding-publishing", + "SNAPSHOT_REPOSITORY_ANALYSIS": "repo-analysis-api.html", + "S3_COMPATIBLE_REPOSITORIES": "repository-s3.html#repository-s3-compatible-services" } diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index b7ca55a2b2b0d..f177ab1468cb2 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -123,3 +123,5 @@ 8.13.4,8503000 8.14.0,8505000 8.14.1,8505000 +8.14.2,8505000 +8.14.3,8505000 diff --git a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java index 0543bce08a4f0..463203c1357b9 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -308,25 +307,13 @@ public String toString() { }); assertThat(listener.toString(), equalTo("notifyOnce[inner-listener]")); - final var threads = new Thread[between(1, 10)]; - final var startBarrier = new CyclicBarrier(threads.length); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - safeAwait(startBarrier); - if (randomBoolean()) { - listener.onResponse(null); - } else { - listener.onFailure(new RuntimeException("test")); - } - }); - } - - for (Thread thread : threads) { - thread.start(); - } - for (Thread thread : threads) { - thread.join(); - } + startInParallel(between(1, 10), i -> { + if (randomBoolean()) { + listener.onResponse(null); + } else { + listener.onFailure(new RuntimeException("test")); + } + }); assertTrue(completed.get()); } diff --git a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 289ab715e3e78..7afa7adedc7bf 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestNodesInfoAction; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -123,7 +123,7 @@ public void testSetupRestHandlerContainsKnownBuiltin() { null, usageService, null, - null, + TelemetryProvider.NOOP, mock(ClusterService.class), null, List.of(), @@ -187,7 +187,7 @@ public String getName() { null, usageService, null, - null, + TelemetryProvider.NOOP, mock(ClusterService.class), null, List.of(), @@ -244,7 +244,7 @@ public List getRestHandlers( null, usageService, null, - null, + TelemetryProvider.NOOP, mock(ClusterService.class), null, List.of(), @@ -335,7 +335,7 @@ public void test3rdPartyRestControllerIsNotInstalled() { null, usageService, null, - null, + TelemetryProvider.NOOP, mock(ClusterService.class), null, List.of(), @@ -388,10 +388,10 @@ public RestController getRestController( NodeClient client, CircuitBreakerService circuitBreakerService, UsageService usageService, - Tracer tracer + TelemetryProvider telemetryProvider ) { if (installController) { - return new RestController(interceptor, client, circuitBreakerService, usageService, tracer); + return new RestController(interceptor, client, circuitBreakerService, usageService, telemetryProvider); } else { return null; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index eb1a64ef66bbd..d78dbae509b63 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -188,7 +188,9 @@ public void testFindAnyUnassignedShardToExplain() { allOf( // no point in asserting the precise wording of the message into this test, but we care that it contains these bits: containsString("No shard was specified in the request"), - containsString("specify the target shard in the request") + containsString("specify the target shard in the request"), + containsString("https://www.elastic.co/guide/en/elasticsearch/reference"), + containsString("cluster-allocation-explain.html") ) ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java index ed81f6750aa27..463446f8b36ed 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java @@ -149,7 +149,9 @@ public void testRandomShardExplanationToXContent() throws Exception { allOf( // no point in asserting the precise wording of the message into this test, but we care that the note contains these bits: containsString("No shard was specified in the explain API request"), - containsString("specify the target shard in the request") + containsString("specify the target shard in the request"), + containsString("https://www.elastic.co/guide/en/elasticsearch/reference"), + containsString("cluster-allocation-explain.html") ) ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 859ee68a7846d..6f2af8414187e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.MasterNodeRequestHelper; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; @@ -470,15 +471,16 @@ private boolean shouldHaveOrigin(String action, TransportRequest request) { */ return false; } - if (false == (request instanceof IndicesRequest)) { + + if (MasterNodeRequestHelper.unwrapTermOverride(request) instanceof IndicesRequest indicesRequest) { + /* + * When the API Tasks API makes an indices request it only every + * targets the .tasks index. Other requests come from the tests. + */ + return Arrays.equals(new String[] { ".tasks" }, indicesRequest.indices()); + } else { return false; } - IndicesRequest ir = (IndicesRequest) request; - /* - * When the API Tasks API makes an indices request it only every - * targets the .tasks index. Other requests come from the tests. - */ - return Arrays.equals(new String[] { ".tasks" }, ir.indices()); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 70c33283c7475..b71d16ee530f8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.cluster.reroute; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -28,6 +29,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; @@ -186,9 +188,13 @@ public void testToXContentWithDeprecatedClusterState() { "0": [] }, "rollover_info": {}, + "mappings_updated_version" : %s, "system": false, "timestamp_range": { "shards": [] + }, + "event_ingested_range": { + "unknown":true } } }, @@ -213,6 +219,7 @@ public void testToXContentWithDeprecatedClusterState() { Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current(), + IndexVersion.current(), IndexVersion.current() ), """ @@ -225,7 +232,7 @@ public void testToXContentWithDeprecatedClusterStateAndMetadata() { assertXContent( createClusterRerouteResponse(createClusterState()), new ToXContent.MapParams(Map.of("metric", "metadata", "settings_filter", "index.number*,index.version.created")), - """ + Strings.format(""" { "acknowledged" : true, "state" : { @@ -265,9 +272,13 @@ public void testToXContentWithDeprecatedClusterStateAndMetadata() { "0" : [ ] }, "rollover_info" : { }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] + }, + "event_ingested_range" : { + "unknown" : true } } }, @@ -277,7 +288,7 @@ public void testToXContentWithDeprecatedClusterStateAndMetadata() { "reserved_state":{} } } - }""", + }""", IndexVersion.current()), """ The [state] field in the response to the reroute API is deprecated and will be removed in a future version. \ Specify ?metric=none to adopt the future behaviour.""" @@ -351,6 +362,7 @@ private static ClusterState createClusterState() { .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .build() ) + .eventIngestedRange(IndexLongFieldRange.UNKNOWN, TransportVersion.current()) .build(), false ) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java index b3caf93fbcddf..24c0f9d97800b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -49,6 +49,9 @@ public void testBwcSerialization() throws Exception { in.setTransportVersion(out.getTransportVersion()); assertEquals(request.getParentTask(), TaskId.readFromStream(in)); assertEquals(request.masterNodeTimeout(), in.readTimeValue()); + if (in.getTransportVersion().onOrAfter(TransportVersions.VERSIONED_MASTER_NODE_REQUESTS)) { + assertEquals(request.masterTerm(), in.readVLong()); + } assertEquals(request.ackTimeout(), in.readTimeValue()); assertArrayEquals(request.indices(), in.readStringArray()); final IndicesOptions indicesOptions = IndicesOptions.readIndicesOptions(in); @@ -75,6 +78,9 @@ public void testBwcSerialization() throws Exception { out.setTransportVersion(version); sample.getParentTask().writeTo(out); out.writeTimeValue(sample.masterNodeTimeout()); + if (out.getTransportVersion().onOrAfter(TransportVersions.VERSIONED_MASTER_NODE_REQUESTS)) { + out.writeVLong(sample.masterTerm()); + } out.writeTimeValue(sample.ackTimeout()); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshActionTests.java new file mode 100644 index 0000000000000..4fcb72a02b83a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshActionTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportUnpromotableShardRefreshActionTests extends ESTestCase { + private ThreadPool threadPool; + private ClusterService clusterService; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("TransportUnpromotableShardRefreshActionTests"); + clusterService = ClusterServiceUtils.createClusterService(threadPool); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + + public void testRespondOKToRefreshRequestBeforeShardIsCreated() { + final var shardId = new ShardId(new Index(randomIdentifier(), randomUUID()), between(0, 3)); + final var shardRouting = TestShardRouting.newShardRouting(shardId, randomUUID(), true, ShardRoutingState.STARTED); + final var indexShardRoutingTable = new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build(); + + final var request = new UnpromotableShardRefreshRequest( + indexShardRoutingTable, + randomNonNegativeLong(), + randomNonNegativeLong(), + randomBoolean() + ); + + final TransportService transportService = mock(TransportService.class); + when(transportService.getThreadPool()).thenReturn(threadPool); + final IndicesService indicesService = mock(IndicesService.class); + if (randomBoolean()) { + when(indicesService.indexService(shardId.getIndex())).thenReturn(null); + } else { + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexService(shardId.getIndex())).thenReturn(indexService); + when(indexService.hasShard(shardId.id())).thenReturn(false); + } + + final var action = new TransportUnpromotableShardRefreshAction( + clusterService, + transportService, + mock(ShardStateAction.class), + mock(ActionFilters.class), + indicesService + ); + + final PlainActionFuture future = new PlainActionFuture<>(); + action.unpromotableShardOperation(mock(Task.class), request, future); + assertThat(safeGet(future), sameInstance(ActionResponse.Empty.INSTANCE)); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java index ffe42722b308d..a51e9b86858d7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java @@ -46,6 +46,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; import java.util.stream.Collectors; @@ -68,7 +69,7 @@ void runTest() { future ); - final var response = future.result(); + final var response = safeGet(future); assertThat(response.getFailures(), empty()); assertThat(response.getStoreStatuses(), anEmptyMap()); assertThat(shardsWithFailures, empty()); @@ -138,7 +139,7 @@ void runTest() { listExpected = false; assertFalse(future.isDone()); deterministicTaskQueue.runAllTasks(); - expectThrows(TaskCancelledException.class, future::result); + expectThrows(ExecutionException.class, TaskCancelledException.class, future::result); } }); } @@ -159,7 +160,10 @@ void runTest() { failOneRequest = true; deterministicTaskQueue.runAllTasks(); assertFalse(failOneRequest); - assertEquals("simulated", expectThrows(ElasticsearchException.class, future::result).getMessage()); + assertEquals( + "simulated", + expectThrows(ExecutionException.class, ElasticsearchException.class, future::result).getMessage() + ); } }); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportAbstractBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportAbstractBulkActionTests.java new file mode 100644 index 0000000000000..4ce0aa6a0c6c2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportAbstractBulkActionTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +public class TransportAbstractBulkActionTests extends ESTestCase { + + public void testGetIndexWriteRequest() { + IndexRequest indexRequest = new IndexRequest("index").id("id1").source(Collections.emptyMap()); + UpdateRequest upsertRequest = new UpdateRequest("index", "id1").upsert(indexRequest).script(mockScript("1")); + UpdateRequest docAsUpsertRequest = new UpdateRequest("index", "id2").doc(indexRequest).docAsUpsert(true); + UpdateRequest scriptedUpsert = new UpdateRequest("index", "id2").upsert(indexRequest).script(mockScript("1")).scriptedUpsert(true); + + assertEquals(TransportAbstractBulkAction.getIndexWriteRequest(indexRequest), indexRequest); + assertEquals(TransportAbstractBulkAction.getIndexWriteRequest(upsertRequest), indexRequest); + assertEquals(TransportAbstractBulkAction.getIndexWriteRequest(docAsUpsertRequest), indexRequest); + assertEquals(TransportAbstractBulkAction.getIndexWriteRequest(scriptedUpsert), indexRequest); + + DeleteRequest deleteRequest = new DeleteRequest("index", "id"); + assertNull(TransportAbstractBulkAction.getIndexWriteRequest(deleteRequest)); + + UpdateRequest badUpsertRequest = new UpdateRequest("index", "id1"); + assertNull(TransportAbstractBulkAction.getIndexWriteRequest(badUpsertRequest)); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index d7adf3aa8b4e2..3683c2c271739 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -146,12 +146,12 @@ class TestTransportBulkAction extends TransportBulkAction { TestTransportBulkAction() { super( - threadPool, + TransportBulkActionIngestTests.this.threadPool, transportService, - clusterService, + TransportBulkActionIngestTests.this.clusterService, ingestService, mockFeatureService, - new NodeClient(Settings.EMPTY, threadPool), + new NodeClient(Settings.EMPTY, TransportBulkActionIngestTests.this.threadPool), new ActionFilters(Collections.emptySet()), TestIndexNameExpressionResolver.newInstance(), new IndexingPressure(SETTINGS), diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 1a34b1e856a5e..ca1d1ac49832e 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -93,7 +93,7 @@ class TestTransportBulkAction extends TransportBulkAction { super( TransportBulkActionTests.this.threadPool, transportService, - clusterService, + TransportBulkActionTests.this.clusterService, null, mockFeatureService, new NodeClient(Settings.EMPTY, TransportBulkActionTests.this.threadPool), @@ -189,24 +189,6 @@ public void testDeleteNonExistingDocExternalGteVersionCreatesIndex() throws Exce assertTrue(bulkAction.indexCreated); } - public void testGetIndexWriteRequest() throws Exception { - IndexRequest indexRequest = new IndexRequest("index").id("id1").source(Collections.emptyMap()); - UpdateRequest upsertRequest = new UpdateRequest("index", "id1").upsert(indexRequest).script(mockScript("1")); - UpdateRequest docAsUpsertRequest = new UpdateRequest("index", "id2").doc(indexRequest).docAsUpsert(true); - UpdateRequest scriptedUpsert = new UpdateRequest("index", "id2").upsert(indexRequest).script(mockScript("1")).scriptedUpsert(true); - - assertEquals(TransportBulkAction.getIndexWriteRequest(indexRequest), indexRequest); - assertEquals(TransportBulkAction.getIndexWriteRequest(upsertRequest), indexRequest); - assertEquals(TransportBulkAction.getIndexWriteRequest(docAsUpsertRequest), indexRequest); - assertEquals(TransportBulkAction.getIndexWriteRequest(scriptedUpsert), indexRequest); - - DeleteRequest deleteRequest = new DeleteRequest("index", "id"); - assertNull(TransportBulkAction.getIndexWriteRequest(deleteRequest)); - - UpdateRequest badUpsertRequest = new UpdateRequest("index", "id1"); - assertNull(TransportBulkAction.getIndexWriteRequest(badUpsertRequest)); - } - public void testProhibitAppendWritesInBackingIndices() throws Exception { String dataStreamName = "logs-foobar"; ClusterState clusterState = createDataStream(dataStreamName); @@ -217,7 +199,10 @@ public void testProhibitAppendWritesInBackingIndices() throws Exception { IndexRequest invalidRequest1 = new IndexRequest(backingIndexName).opType(DocWriteRequest.OpType.CREATE); Exception e = expectThrows( IllegalArgumentException.class, - () -> TransportBulkAction.prohibitAppendWritesInBackingIndices(invalidRequest1, metadata) + () -> TransportBulkAction.prohibitAppendWritesInBackingIndices( + invalidRequest1, + metadata.getIndicesLookup().get(invalidRequest1.index()) + ) ); assertThat( e.getMessage(), @@ -231,7 +216,10 @@ public void testProhibitAppendWritesInBackingIndices() throws Exception { IndexRequest invalidRequest2 = new IndexRequest(backingIndexName).opType(DocWriteRequest.OpType.INDEX); e = expectThrows( IllegalArgumentException.class, - () -> TransportBulkAction.prohibitAppendWritesInBackingIndices(invalidRequest2, metadata) + () -> TransportBulkAction.prohibitAppendWritesInBackingIndices( + invalidRequest2, + metadata.getIndicesLookup().get(invalidRequest2.index()) + ) ); assertThat( e.getMessage(), @@ -245,28 +233,28 @@ public void testProhibitAppendWritesInBackingIndices() throws Exception { DocWriteRequest validRequest = new IndexRequest(backingIndexName).opType(DocWriteRequest.OpType.INDEX) .setIfSeqNo(1) .setIfPrimaryTerm(1); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); validRequest = new DeleteRequest(backingIndexName); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); validRequest = new UpdateRequest(backingIndexName, "_id"); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); // Testing append only write via ds name validRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); validRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.INDEX); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); // Append only for a backing index that doesn't exist is allowed: validRequest = new IndexRequest(DataStream.getDefaultBackingIndexName("logs-barbaz", 1)).opType(DocWriteRequest.OpType.CREATE); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); // Some other index names: validRequest = new IndexRequest("my-index").opType(DocWriteRequest.OpType.CREATE); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); validRequest = new IndexRequest("foobar").opType(DocWriteRequest.OpType.CREATE); - TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata); + TransportBulkAction.prohibitAppendWritesInBackingIndices(validRequest, metadata.getIndicesLookup().get(validRequest.index())); } public void testProhibitCustomRoutingOnDataStream() throws Exception { @@ -279,7 +267,10 @@ public void testProhibitCustomRoutingOnDataStream() throws Exception { .routing("custom"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> prohibitCustomRoutingOnDataStream(writeRequestAgainstDataStream, metadata) + () -> prohibitCustomRoutingOnDataStream( + writeRequestAgainstDataStream, + metadata.getIndicesLookup().get(writeRequestAgainstDataStream.index()) + ) ); assertThat( exception.getMessage(), @@ -293,7 +284,7 @@ public void testProhibitCustomRoutingOnDataStream() throws Exception { DocWriteRequest writeRequestAgainstIndex = new IndexRequest(DataStream.getDefaultBackingIndexName(dataStreamName, 1L)).opType( DocWriteRequest.OpType.INDEX ).routing("custom"); - prohibitCustomRoutingOnDataStream(writeRequestAgainstIndex, metadata); + prohibitCustomRoutingOnDataStream(writeRequestAgainstIndex, metadata.getIndicesLookup().get(writeRequestAgainstIndex.index())); } public void testOnlySystem() { @@ -415,13 +406,13 @@ public void testResolveFailureStoreFromMetadata() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailure(dataStreamWithFailureStore, metadata, testTime), is(true)); + assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailure(dataStreamWithoutFailureStore, metadata, testTime), is(false)); + assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); // An index should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailure(backingIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat(TransportBulkAction.shouldStoreFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(false)); // even if that index is itself a failure store - assertThat(TransportBulkAction.shouldStoreFailure(failureStoreIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat(TransportBulkAction.shouldStoreFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), is(false)); } public void testResolveFailureStoreFromTemplate() throws Exception { @@ -452,11 +443,11 @@ public void testResolveFailureStoreFromTemplate() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailure(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); + assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailure(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); // An index template should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailure(indexTemplate + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.shouldStoreFailureInternal(indexTemplate + "-1", metadata, testTime), is(false)); } private BulkRequest buildBulkRequest(List indices) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 18418dda59a3b..1f54d8dd1edd5 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; +import org.mockito.ArgumentCaptor; import java.io.IOException; import java.util.Collections; @@ -120,7 +121,7 @@ public void testExecuteBulkIndexRequest() throws Exception { null, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -152,7 +153,7 @@ public void testExecuteBulkIndexRequest() throws Exception { null, threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(new RuntimeException("fail")), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -209,7 +210,7 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { null, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ActionListener.runAfter(ActionTestUtils.assertNoFailureListener(result -> { // since at least 1 item passed, the tran log location should exist, assertThat(((WritePrimaryResult) result).location, notNullValue()); @@ -285,7 +286,7 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { assertNotNull(update); updateCalled.incrementAndGet(); listener.onResponse(null); - }, listener -> listener.onResponse(null), ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE); + }, (listener, mappingVersion) -> listener.onResponse(null), ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE); assertTrue(context.isInitial()); assertTrue(context.hasMoreOperationsToExecute()); assertThat(context.getUpdateRetryCounter(), equalTo(0)); @@ -304,7 +305,7 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { null, threadPool::absoluteTimeInMillis, (update, shardId, listener) -> fail("should not have had to update the mappings"), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -345,7 +346,7 @@ public void testExecuteBulkIndexRequestWithErrorWhileUpdatingMapping() throws Ex null, threadPool::absoluteTimeInMillis, errorOnWait == false ? new ThrowingMappingUpdatePerformer(err) : new NoopMappingUpdatePerformer(), - errorOnWait ? listener -> listener.onFailure(err) : listener -> listener.onResponse(null), + errorOnWait ? (listener, mappingVersion) -> listener.onFailure(err) : (listener, mappingVersion) -> listener.onResponse(null), new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(Void aVoid) {} @@ -398,7 +399,7 @@ public void testExecuteBulkDeleteRequest() throws Exception { null, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -446,7 +447,7 @@ public void testExecuteBulkDeleteRequest() throws Exception { null, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -510,7 +511,7 @@ public void testNoopUpdateRequest() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -566,7 +567,7 @@ public void testUpdateRequestWithFailure() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -588,6 +589,7 @@ public void testUpdateRequestWithFailure() throws Exception { assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); } + @SuppressWarnings("unchecked") public void testUpdateRequestWithConflictFailure() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); int retries = randomInt(4); @@ -631,7 +633,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> listener.onResponse(null), + (listener, mappingVersion) -> listener.onResponse(null), ASSERTING_DONE_LISTENER, documentParsingProvider ); @@ -651,11 +653,14 @@ public void testUpdateRequestWithConflictFailure() throws Exception { assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); - // we have set noParsedBytesToReport on the IndexRequest, like it happens with updates by script. - verify(documentParsingProvider, times(0)).newDocumentSizeObserver(); - verify(documentParsingProvider, times(0)).newFixedSizeDocumentObserver(any(Integer.class)); + // we have set 0 value on normalisedBytesParsed on the IndexRequest, like it happens with updates by script. + ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); + verify(documentParsingProvider, times(retries + 1)).newDocumentSizeObserver(argument.capture()); + IndexRequest value = argument.getValue(); + assertThat(value.getNormalisedBytesParsed(), equalTo(0L)); } + @SuppressWarnings("unchecked") public void testUpdateRequestWithSuccess() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); @@ -697,7 +702,7 @@ public void testUpdateRequestWithSuccess() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, documentParsingProvider ); @@ -715,8 +720,11 @@ public void testUpdateRequestWithSuccess() throws Exception { DocWriteResponse response = primaryResponse.getResponse(); assertThat(response.status(), equalTo(created ? RestStatus.CREATED : RestStatus.OK)); assertThat(response.getSeqNo(), equalTo(13L)); - verify(documentParsingProvider, times(0)).newDocumentSizeObserver(); - verify(documentParsingProvider, times(1)).newFixedSizeDocumentObserver(eq(100L)); + + ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); + verify(documentParsingProvider, times(1)).newDocumentSizeObserver(argument.capture()); + IndexRequest value = argument.getValue(); + assertThat(value.getNormalisedBytesParsed(), equalTo(100L)); } public void testUpdateWithDelete() throws Exception { @@ -756,7 +764,7 @@ public void testUpdateWithDelete() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> listener.onResponse(null), + (listener, mappingVersion) -> listener.onResponse(null), ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -794,7 +802,7 @@ public void testFailureDuringUpdateProcessing() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -834,7 +842,7 @@ public void testTranslogPositionToSync() throws Exception { null, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> {}, + (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, DocumentParsingProvider.EMPTY_INSTANCE ); @@ -937,7 +945,7 @@ public void testRetries() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), - listener -> listener.onResponse(null), + (listener, mappingVersion) -> listener.onResponse(null), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { assertThat(((WritePrimaryResult) result).location, equalTo(resultLocation)); BulkItemResponse primaryResponse = result.replicaRequest().items()[0].getPrimaryResponse(); @@ -1034,7 +1042,7 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { throw new IllegalStateException(e); } }, - listener -> listener.onResponse(null), + (listener, mappingVersion) -> listener.onResponse(null), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> // Assert that we still need to fsync the location that was successfully written assertThat(((WritePrimaryResult) result).location, equalTo(resultLocation1))), latch), @@ -1096,7 +1104,7 @@ public void testPerformOnPrimaryReportsBulkStats() throws Exception { listener.onResponse(null); } }, - listener -> listener.onFailure(new IllegalStateException("no failure expected")), + (listener, mappingVersion) -> listener.onFailure(new IllegalStateException("no failure expected")), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { try { BulkStats bulkStats = shard.bulkStats(); @@ -1156,7 +1164,7 @@ public void testNoopMappingUpdateInfiniteLoopPrevention() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, (update, shardId, listener) -> fail("the master should not be contacted as the operation yielded a noop mapping update"), - listener -> listener.onResponse(null), + (listener, mappingVersion) -> listener.onResponse(null), ActionTestUtils.assertNoFailureListener(result -> {}), threadPool.executor(Names.WRITE) ) @@ -1200,7 +1208,7 @@ public void testNoopMappingUpdateSuccessOnRetry() throws Exception { when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(documentMapper); // on the second invocation, the mapping version is incremented // so that the second mapping update attempt doesn't trigger the infinite loop prevention - when(mapperService.mappingVersion()).thenReturn(0L, 1L); + when(mapperService.mappingVersion()).thenReturn(0L, 0L, 1L); UpdateHelper updateHelper = mock(UpdateHelper.class); when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( @@ -1223,7 +1231,7 @@ public void testNoopMappingUpdateSuccessOnRetry() throws Exception { updateHelper, threadPool::absoluteTimeInMillis, (update, shardId, listener) -> fail("the master should not be contacted as the operation yielded a noop mapping update"), - listener -> listener.onFailure(new IllegalStateException("no failure expected")), + (listener, mappingVersion) -> listener.onFailure(new IllegalStateException("no failure expected")), new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { BulkItemResponse primaryResponse = result.replicaRequest().items()[0].getPrimaryResponse(); assertFalse(primaryResponse.isFailed()); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java index 590029f8537f7..9e80f73d4df4a 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java @@ -8,19 +8,16 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.SimulateIndexResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexVersions; @@ -34,6 +31,8 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; import org.junit.Before; @@ -63,37 +62,17 @@ public class TransportSimulateBulkActionTests extends ESTestCase { class TestTransportSimulateBulkAction extends TransportSimulateBulkAction { - volatile boolean failIndexCreation = false; - boolean indexCreated = false; // set when the "real" index is created - Runnable beforeIndexCreation = null; - TestTransportSimulateBulkAction() { super( TransportSimulateBulkActionTests.this.threadPool, transportService, - clusterService, - null, + TransportSimulateBulkActionTests.this.clusterService, null, - new NodeClient(Settings.EMPTY, TransportSimulateBulkActionTests.this.threadPool), - new ActionFilters(Collections.emptySet()), - new TransportBulkActionTookTests.Resolver(), + new ActionFilters(Set.of()), new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE ); } - - @Override - void createIndex(CreateIndexRequest createIndexRequest, ActionListener listener) { - indexCreated = true; - if (beforeIndexCreation != null) { - beforeIndexCreation.run(); - } - if (failIndexCreation) { - listener.onFailure(new ResourceAlreadyExistsException("index already exists")); - } else { - listener.onResponse(null); - } - } } @Before @@ -137,10 +116,11 @@ public void testIndexData() { for (int i = 0; i < bulkItemCount; i++) { Map source = Map.of(randomAlphaOfLength(10), randomAlphaOfLength(5)); IndexRequest indexRequest = new IndexRequest(randomAlphaOfLength(10)).id(randomAlphaOfLength(10)).source(source); + indexRequest.setListExecutedPipelines(true); for (int j = 0; j < randomIntBetween(0, 10); j++) { indexRequest.addPipeline(randomAlphaOfLength(12)); } - bulkRequest.add(); + bulkRequest.add(indexRequest); } AtomicBoolean onResponseCalled = new AtomicBoolean(false); ActionListener listener = new ActionListener<>() { @@ -148,6 +128,7 @@ public void testIndexData() { public void onResponse(BulkResponse response) { onResponseCalled.set(true); BulkItemResponse[] responseItems = response.getItems(); + assertThat(responseItems.length, equalTo(bulkItemCount)); assertThat(responseItems.length, equalTo(bulkRequest.requests().size())); for (int i = 0; i < responseItems.length; i++) { BulkItemResponse responseItem = responseItems[i]; @@ -167,12 +148,15 @@ public void onResponse(BulkResponse response) { Strings.format( """ { + "_id": "%s", "_index": "%s", + "_version": -3, "_source": %s, "executed_pipelines": [%s] }""", + indexRequest.id(), indexRequest.index(), - indexRequest.source(), + convertMapToJsonString(indexRequest.sourceAsMap()), indexRequest.getExecutedPipelines() .stream() .map(pipeline -> "\"" + pipeline + "\"") @@ -192,20 +176,13 @@ public void onFailure(Exception e) { fail(e, "Unexpected error"); } }; - Map indicesToAutoCreate = Map.of(); // unused - Set dataStreamsToRollover = Set.of(); // unused - Set failureStoresToRollover = Set.of(); // unused - long startTime = 0; - bulkAction.createMissingIndicesAndIndexData( - task, - bulkRequest, - r -> fail("executor is unused"), - listener, - indicesToAutoCreate, - dataStreamsToRollover, - failureStoresToRollover, - startTime - ); + bulkAction.doInternalExecute(task, bulkRequest, r -> fail("executor is unused"), listener, randomLongBetween(0, Long.MAX_VALUE)); assertThat(onResponseCalled.get(), equalTo(true)); } + + private String convertMapToJsonString(Map map) throws IOException { + try (XContentBuilder builder = JsonXContent.contentBuilder().map(map)) { + return BytesReference.bytes(builder).utf8ToString(); + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 6106dbf1fbc5a..c05cb054ce391 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -217,22 +217,6 @@ public void testSerializeDynamicTemplates() throws Exception { IndexRequest serialized = new IndexRequest(in); assertThat(serialized.getDynamicTemplates(), anEmptyMap()); } - // old version - { - Map dynamicTemplates = IntStream.range(0, randomIntBetween(1, 10)) - .boxed() - .collect(Collectors.toMap(n -> "field-" + n, n -> "name-" + n)); - indexRequest.setDynamicTemplates(dynamicTemplates); - TransportVersion ver = TransportVersionUtils.randomVersionBetween( - random(), - TransportVersions.V_7_0_0, - TransportVersionUtils.getPreviousVersion(TransportVersions.V_7_13_0) - ); - BytesStreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(ver); - IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> indexRequest.writeTo(out)); - assertThat(error.getMessage(), equalTo("[dynamic_templates] parameter requires all nodes on 7.13.0 or later")); - } // new version { Map dynamicTemplates = IntStream.range(0, randomIntBetween(0, 10)) diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 70c4d73f578b3..e61d86bbf2a58 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.CanMatchNodeResponse.ResponseOrFailure; @@ -26,8 +27,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -38,6 +37,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -72,6 +72,7 @@ import static org.elasticsearch.action.search.SearchAsyncActionTests.getShardsIter; import static org.elasticsearch.core.Types.forciblyCast; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; @@ -464,7 +465,17 @@ public void sendCanMatch( } } - public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exception { + // test using @timestamp + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingTimestamp() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(DataStream.TIMESTAMP_FIELD_NAME); + } + + // test using event.ingested + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingEventIngested() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + } + + public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampField) throws Exception { Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); @@ -475,15 +486,10 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timestampField, indexMinTimestamp, indexMaxTimestamp); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timestampField); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -535,26 +541,107 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio ); } - public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { - Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); - Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + public void testCoordinatorCanMatchFilteringThatCanBeSkippedUsingBothTimestamps() throws Exception { + Index dataStreamIndex1 = new Index(".ds-twoTimestamps0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-twoTimestamps0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); - List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + List regularIndices = randomList(1, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); long indexMinTimestamp = randomLongBetween(0, 5000); long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( + // use same range for both @timestamp and event.ingested + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, + indexMinTimestamp, + indexMaxTimestamp, indexMinTimestamp, indexMaxTimestamp ); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + /** + * Expected behavior: if either @timestamp or 'event.ingested' filters in the query are "out of range" (do not + * overlap the range in cluster state), then all shards in the datastream should be skipped. + * Only if both @timestamp or 'event.ingested' filters are "in range" should the data stream shards be searched + */ + boolean timestampQueryOutOfRange = randomBoolean(); + boolean eventIngestedQueryOutOfRange = randomBoolean(); + int timestampOffset = timestampQueryOutOfRange ? 1 : -500; + int eventIngestedOffset = eventIngestedQueryOutOfRange ? 1 : -500; + + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMaxTimestamp + timestampOffset).to(indexMaxTimestamp + 2); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMaxTimestamp + eventIngestedOffset).to(indexMaxTimestamp + 2); + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + (updatedSearchShardIterators, requests) -> { + List skippedShards = updatedSearchShardIterators.stream().filter(SearchShardIterator::skip).toList(); + List nonSkippedShards = updatedSearchShardIterators.stream() + .filter(searchShardIterator -> searchShardIterator.skip() == false) + .toList(); + + if (timestampQueryOutOfRange || eventIngestedQueryOutOfRange) { + // data stream shards should have been skipped + assertThat(skippedShards.size(), greaterThan(0)); + boolean allSkippedShardAreFromDataStream = skippedShards.stream() + .allMatch(shardIterator -> dataStream.getIndices().contains(shardIterator.shardId().getIndex())); + assertThat(allSkippedShardAreFromDataStream, equalTo(true)); + + boolean allNonSkippedShardsAreFromRegularIndices = nonSkippedShards.stream() + .allMatch(shardIterator -> regularIndices.contains(shardIterator.shardId().getIndex())); + assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); + + boolean allRequestsWereTriggeredAgainstRegularIndices = requests.stream() + .allMatch(request -> regularIndices.contains(request.shardId().getIndex())); + assertThat(allRequestsWereTriggeredAgainstRegularIndices, equalTo(true)); + + } else { + assertThat(skippedShards.size(), equalTo(0)); + long countSkippedShardsFromDatastream = nonSkippedShards.stream() + .filter(iter -> dataStream.getIndices().contains(iter.shardId().getIndex())) + .count(); + assertThat(countSkippedShardsFromDatastream, greaterThan(0L)); + } + } + ); + } + + public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + long indexMinTimestamp = randomLongBetween(0, 5000); + long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); + } + + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // Query with a non default date format rangeQueryBuilder.from("2020-1-01").to("2021-1-01"); @@ -585,23 +672,20 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + long indexMinTimestamp = 10; long indexMaxTimestamp = 20; StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); } BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); // Query inside of the data stream index range if (randomBoolean()) { // Query generation - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // We query a range within the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMinTimestamp).to(indexMaxTimestamp); @@ -614,8 +698,7 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep } } else { // We query a range outside of the timestamp range covered by both datastream indices - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestamp + 1) - .to(indexMaxTimestamp + 2); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField).from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); @@ -635,17 +718,86 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep ); } + public void testCanMatchFilteringOnCoordinatorWithTimestampAndEventIngestedThatCanNotBeSkipped() throws Exception { + // Generate indices + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + long indexMinTimestampForTs = 10; + long indexMaxTimestampForTs = 20; + long indexMinTimestampForEventIngested = 10; + long indexMaxTimestampForEventIngested = 20; + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( + dataStreamIndex, + indexMinTimestampForTs, + indexMaxTimestampForTs, + indexMinTimestampForEventIngested, + indexMaxTimestampForEventIngested + ); + } + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); + // Query inside of the data stream index range + if (randomBoolean()) { + // Query generation + // We query a range within both timestamp ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMinTimestampForTs).to(indexMaxTimestampForTs); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMinTimestampForEventIngested).to(indexMaxTimestampForEventIngested); + + queryBuilder.filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + } else { + // We query a range outside of the both ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestampForTs + 1) + .to(indexMaxTimestampForTs + 2); + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME).from( + indexMaxTimestampForEventIngested + 1 + ).to(indexMaxTimestampForEventIngested + 2); + + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); + + // This is always evaluated as true in the coordinator as we cannot determine there if + // the term query clause is false. + queryBuilder.should(tsRangeQueryBuilder).should(eventIngestedRangeQueryBuilder).should(termQueryBuilder); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + this::assertAllShardsAreQueried + ); + } + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withDefaultBackgroundFilter() throws Exception { Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timeField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timeField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timeField, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timeField).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms"); assignShardsAndExecuteCanMatchPhase( @@ -661,20 +813,22 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w } public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withBackgroundFilter() throws Exception { + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); Index index4 = new Index("index4", UUIDs.base64UUID()); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); - contextProviderBuilder.addIndexMinMaxTimestamps(index4, DataStream.TIMESTAMP_FIELD_NAME, 3000, 3999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index4, timestampField, 3000, 3999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(3100).to(3200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(3100).to(3200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(0).to(1999) + new RangeQueryBuilder(timestampField).from(0).to(1999) ); assignShardsAndExecuteCanMatchPhase( @@ -703,14 +857,53 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2000).to(2300) + new RangeQueryBuilder(timestampField).from(2000).to(2300) + ); + SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); + + assignShardsAndExecuteCanMatchPhase( + List.of(), + List.of(index1, index2, index3), + contextProviderBuilder.build(), + query, + List.of(aggregation), + suggest, + // The query and aggregation and match only index3, but suggest should match everything. + this::assertAllShardsAreQueried + ); + } + + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withSuggest_withTwoTimestamps() throws Exception { + Index index1 = new Index("index1", UUIDs.base64UUID()); + Index index2 = new Index("index2", UUIDs.base64UUID()); + Index index3 = new Index("index3", UUIDs.base64UUID()); + + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index1, 0, 999, 0, 999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index2, 1000, 1999, 1000, 1999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index3, 2000, 2999, 2000, 2999); + + String fieldInRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + String fieldOutOfRange = DataStream.TIMESTAMP_FIELD_NAME; + + if (randomBoolean()) { + fieldInRange = DataStream.TIMESTAMP_FIELD_NAME; + fieldOutOfRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + } + + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(fieldInRange).from(2100).to(2200)) + .filter(new RangeQueryBuilder(fieldOutOfRange).from(8888).to(9999)); + AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( + new RangeQueryBuilder(fieldInRange).from(2000).to(2300) ); SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); @@ -744,13 +937,13 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedTsdb() throws Exce long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index index : dataStream1.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(index, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps(index, DataStream.TIMESTAMP_FIELD_NAME, indexMinTimestamp, indexMaxTimestamp); } for (Index index : dataStream2.getIndices()) { contextProviderBuilder.addIndex(index); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp"); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -954,9 +1147,9 @@ public void sendCanMatch( canMatchResultsConsumer.accept(updatedSearchShardIterators, requests); } - private static class StaticCoordinatorRewriteContextProviderBuilder { + static class StaticCoordinatorRewriteContextProviderBuilder { private ClusterState clusterState = ClusterState.EMPTY_STATE; - private final Map fields = new HashMap<>(); + private final Map fields = new HashMap<>(); private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTimeStamp, long maxTimestamp) { if (clusterState.metadata().index(index) != null) { @@ -974,35 +1167,64 @@ private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTim IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0) - .timestampRange(timestampRange); + .numberOfReplicas(0); + if (fieldName.equals(DataStream.TIMESTAMP_FIELD_NAME)) { + indexMetadataBuilder.timestampRange(timestampRange); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(fieldName), null, null, null)); + } else if (fieldName.equals(IndexMetadata.EVENT_INGESTED_FIELD_NAME)) { + indexMetadataBuilder.eventIngestedRange(timestampRange, TransportVersion.current()); + fields.put(index, new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(fieldName), null)); + } Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); - clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - - fields.put(index, new DateFieldMapper.DateFieldType(fieldName)); } - private void addIndexMinMaxTimestamps(Index index, long minTimestamp, long maxTimestamp) { + /** + * Add min/max timestamps to IndexMetadata for the specified index for both @timestamp and 'event.ingested' + */ + private void addIndexMinMaxForTimestampAndEventIngested( + Index index, + long minTimestampForTs, + long maxTimestampForTs, + long minTimestampForEventIngested, + long maxTimestampForEventIngested + ) { if (clusterState.metadata().index(index) != null) { throw new IllegalArgumentException("Min/Max timestamps for " + index + " were already defined"); } - Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "a_field") - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(minTimestamp)) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(maxTimestamp)); + IndexLongFieldRange tsTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForTs, maxTimestampForTs) + ); + IndexLongFieldRange eventIngestedTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForEventIngested, maxTimestampForEventIngested) + ); + + Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0); + .numberOfReplicas(0) + .timestampRange(tsTimestampRange) + .eventIngestedRange(eventIngestedTimestampRange, TransportVersion.current()); Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put( + index, + new DateFieldRangeInfo( + new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), + null, + new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), + null + ) + ); } private void addIndex(Index index) { @@ -1018,7 +1240,7 @@ private void addIndex(Index index) { Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), null, null, null)); } public CoordinatorRewriteContextProvider build() { diff --git a/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java index 818f74da5853a..c182d93ffafc9 100644 --- a/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java @@ -416,7 +416,7 @@ public void testKnnSearchAction() throws IOException { // how the action works (it builds a kNN query under the hood) float[] queryVector = randomVector(); assertResponse( - client().prepareSearch("index1", "index2").setQuery(new KnnVectorQueryBuilder("vector", queryVector, 5, null)).setSize(2), + client().prepareSearch("index1", "index2").setQuery(new KnnVectorQueryBuilder("vector", queryVector, null, 5, null)).setSize(2), response -> { // The total hits is num_cands * num_shards, since the query gathers num_cands hits from each shard assertHitCount(response, 5 * 2); diff --git a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java index db32213ff97b7..ab7d9f180eae4 100644 --- a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java @@ -53,7 +53,13 @@ public void setup() { searchPhaseController = new SearchPhaseController((t, s) -> new AggregationReduceContext.Builder() { @Override public AggregationReduceContext forPartialReduction() { - return new AggregationReduceContext.ForPartial(BigArrays.NON_RECYCLING_INSTANCE, null, t, mock(AggregationBuilder.class)); + return new AggregationReduceContext.ForPartial( + BigArrays.NON_RECYCLING_INSTANCE, + null, + t, + mock(AggregationBuilder.class), + b -> {} + ); } public AggregationReduceContext forFinalReduction() { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 43bca4bae2f3f..118a7055cd782 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -122,7 +122,7 @@ public void setup() { @Override public AggregationReduceContext forPartialReduction() { reductions.add(false); - return new AggregationReduceContext.ForPartial(BigArrays.NON_RECYCLING_INSTANCE, null, t, agg); + return new AggregationReduceContext.ForPartial(BigArrays.NON_RECYCLING_INSTANCE, null, t, agg, b -> {}); } public AggregationReduceContext forFinalReduction() { diff --git a/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java b/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java index 2ca914eb23c61..4784a42014825 100644 --- a/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java @@ -14,6 +14,8 @@ import org.elasticsearch.core.Assertions; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; import java.util.concurrent.CancellationException; @@ -21,6 +23,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; public class PlainActionFutureTests extends ESTestCase { @@ -70,7 +73,6 @@ public void testNoResult() { assumeTrue("assertions required for this test", Assertions.ENABLED); final var future = new PlainActionFuture<>(); expectThrows(AssertionError.class, future::result); - expectThrows(AssertionError.class, future::actionResult); } public void testUnwrapException() { @@ -90,19 +92,17 @@ private void checkUnwrap(Exception exception, Class actionG assertEquals(actionGetException, expectThrows(RuntimeException.class, future::actionGet).getClass()); assertEquals(actionGetException, expectThrows(RuntimeException.class, () -> future.actionGet(10, TimeUnit.SECONDS)).getClass()); - assertEquals(actionGetException, expectThrows(RuntimeException.class, future::actionResult).getClass()); - assertEquals(actionGetException, expectThrows(RuntimeException.class, expectIgnoresInterrupt(future::actionResult)).getClass()); assertEquals(getException, expectThrows(ExecutionException.class, future::get).getCause().getClass()); assertEquals(getException, expectThrows(ExecutionException.class, () -> future.get(10, TimeUnit.SECONDS)).getCause().getClass()); if (exception instanceof RuntimeException) { - assertEquals(getException, expectThrows(Exception.class, future::result).getClass()); - assertEquals(getException, expectThrows(Exception.class, expectIgnoresInterrupt(future::result)).getClass()); + expectThrows(ExecutionException.class, getException, future::result); + expectThrows(ExecutionException.class, getException, expectIgnoresInterrupt(future::result)); assertEquals(getException, expectThrows(Exception.class, () -> FutureUtils.get(future)).getClass()); assertEquals(getException, expectThrows(Exception.class, () -> FutureUtils.get(future, 10, TimeUnit.SECONDS)).getClass()); } else { - assertEquals(getException, expectThrowsWrapped(future::result).getClass()); - assertEquals(getException, expectThrowsWrapped(expectIgnoresInterrupt(future::result)).getClass()); + expectThrows(ExecutionException.class, getException, future::result); + expectThrows(ExecutionException.class, getException, expectIgnoresInterrupt(future::result)); assertEquals(getException, expectThrowsWrapped(() -> FutureUtils.get(future)).getClass()); assertEquals(getException, expectThrowsWrapped(() -> FutureUtils.get(future, 10, TimeUnit.SECONDS)).getClass()); } @@ -126,12 +126,10 @@ public void testCancelException() { assertCancellation(() -> future.get(10, TimeUnit.SECONDS)); assertCancellation(() -> future.actionGet(10, TimeUnit.SECONDS)); assertCancellation(future::result); - assertCancellation(future::actionResult); try { Thread.currentThread().interrupt(); assertCancellation(future::result); - assertCancellation(future::actionResult); } finally { assertTrue(Thread.interrupted()); } @@ -142,6 +140,39 @@ public void testCancelException() { assertPropagatesInterrupt(() -> future.actionGet(10, TimeUnit.SECONDS)); } + public void testAssertCompleteAllowedAllowsConcurrentCompletesFromSamePool() { + final AtomicReference> futureReference = new AtomicReference<>(new PlainActionFuture<>()); + final var executorName = randomFrom(ThreadPool.Names.GENERIC, ThreadPool.Names.MANAGEMENT); + final var running = new AtomicBoolean(true); + try (TestThreadPool threadPool = new TestThreadPool(getTestName())) { + // We only need 4 threads to reproduce this issue reliably, using more threads + // just increases the run time due to the additional synchronisation + final var threadCount = Math.min(threadPool.info(executorName).getMax(), 4); + final var startBarrier = new CyclicBarrier(threadCount + 1); + // N threads competing to complete the futures + for (int i = 0; i < threadCount; i++) { + threadPool.executor(executorName).execute(() -> { + safeAwait(startBarrier); + while (running.get()) { + futureReference.get().onResponse(null); + } + }); + } + // The race can only occur once per completion, so we provide + // a stream of new futures to the competing threads to + // maximise the probability it occurs. Providing them + // with new futures while they spin proved to be much + // more reliable at reproducing the issue than releasing + // them all from a barrier to complete a single future. + safeAwait(startBarrier); + for (int i = 0; i < 20; i++) { + futureReference.set(new PlainActionFuture<>()); + safeSleep(1); + } + running.set(false); + } + } + private static void assertCancellation(ThrowingRunnable runnable) { final var cancellationException = expectThrows(CancellationException.class, runnable); assertEquals("Task was cancelled.", cancellationException.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index a4838f568e173..6568464705d9e 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -524,7 +524,7 @@ public void testDelegateToMaster() throws ExecutionException, InterruptedExcepti assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; assertTrue(capturedRequest.node().isMasterNode()); - assertThat(capturedRequest.request(), equalTo(request)); + assertThat(asInstanceOf(TermOverridingMasterNodeRequest.class, capturedRequest.request()).request, equalTo(request)); assertThat(capturedRequest.action(), equalTo("internal:testAction")); Response response = new Response(); @@ -552,7 +552,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted assertThat(capturedRequests.length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = capturedRequests[0]; assertTrue(capturedRequest.node().isMasterNode()); - assertThat(capturedRequest.request(), equalTo(request)); + assertThat(asInstanceOf(TermOverridingMasterNodeRequest.class, capturedRequest.request()).request, equalTo(request)); assertThat(capturedRequest.action(), equalTo("internal:testAction")); if (rejoinSameMaster) { @@ -586,7 +586,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted assertThat(capturedRequests.length, equalTo(1)); capturedRequest = capturedRequests[0]; assertTrue(capturedRequest.node().isMasterNode()); - assertThat(capturedRequest.request(), equalTo(request)); + assertThat(asInstanceOf(TermOverridingMasterNodeRequest.class, capturedRequest.request()).request, equalTo(request)); assertThat(capturedRequest.action(), equalTo("internal:testAction")); } else if (failsWithConnectTransportException) { transport.handleRemoteError(capturedRequest.requestId(), new ConnectTransportException(masterNode, "Fake error")); @@ -639,7 +639,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; assertTrue(capturedRequest.node().isMasterNode()); - assertThat(capturedRequest.request(), equalTo(request)); + assertThat(asInstanceOf(TermOverridingMasterNodeRequest.class, capturedRequest.request()).request, equalTo(request)); assertThat(capturedRequest.action(), equalTo("internal:testAction")); transport.handleResponse(capturedRequest.requestId(), response); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 5530ec61fea33..340ca87968db0 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -92,7 +92,6 @@ public class TransportWriteActionTests extends ESTestCase { private ClusterService clusterService; private IndexShard indexShard; - private Translog.Location location; @BeforeClass public static void beforeClass() { @@ -102,7 +101,6 @@ public static void beforeClass() { @Before public void initCommonMocks() { indexShard = mock(IndexShard.class); - location = mock(Translog.Location.class); clusterService = createClusterService(threadPool); when(indexShard.refresh(any())).thenReturn(new Engine.RefreshResult(true, randomNonNegativeLong(), 1)); ReplicationGroup replicationGroup = mock(ReplicationGroup.class); @@ -483,7 +481,14 @@ protected void dispatchedShardOperationOnPrimary( if (withDocumentFailureOnPrimary) { throw new RuntimeException("simulated"); } else { - return new WritePrimaryResult<>(request, new TestResponse(), location, primary, logger, postWriteRefresh); + return new WritePrimaryResult<>( + request, + new TestResponse(), + Translog.Location.EMPTY, + primary, + logger, + postWriteRefresh + ); } }); } @@ -495,7 +500,7 @@ protected void dispatchedShardOperationOnReplica(TestRequest request, IndexShard if (withDocumentFailureOnReplica) { replicaResult = new WriteReplicaResult<>(request, null, new RuntimeException("simulated"), replica, logger); } else { - replicaResult = new WriteReplicaResult<>(request, location, null, replica, logger); + replicaResult = new WriteReplicaResult<>(request, Translog.Location.EMPTY, null, replica, logger); } return replicaResult; }); diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/SynonymsTestUtils.java b/server/src/test/java/org/elasticsearch/action/synonyms/SynonymsTestUtils.java index 21b5c42ba3ad6..d99f2993159a4 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/SynonymsTestUtils.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/SynonymsTestUtils.java @@ -17,14 +17,26 @@ import static org.elasticsearch.test.ESTestCase.randomIdentifier; import static org.elasticsearch.test.ESTestCase.randomLongBetween; -class SynonymsTestUtils { +public class SynonymsTestUtils { private SynonymsTestUtils() { throw new UnsupportedOperationException(); } + public static SynonymRule[] randomSynonymsSet(int length) { + return randomSynonymsSet(length, length); + } + + public static SynonymRule[] randomSynonymsSet(int minLength, int maxLength) { + return randomArray(minLength, maxLength, SynonymRule[]::new, SynonymsTestUtils::randomSynonymRule); + } + + public static SynonymRule[] randomSynonymsSetWithoutIds(int minLength, int maxLength) { + return randomArray(minLength, maxLength, SynonymRule[]::new, () -> randomSynonymRule(null)); + } + static SynonymRule[] randomSynonymsSet() { - return randomArray(10, SynonymRule[]::new, SynonymsTestUtils::randomSynonymRule); + return randomSynonymsSet(0, 10); } static SynonymSetSummary[] randomSynonymsSetSummary() { @@ -32,10 +44,11 @@ static SynonymSetSummary[] randomSynonymsSetSummary() { } static SynonymRule randomSynonymRule() { - return new SynonymRule( - randomBoolean() ? null : randomIdentifier(), - String.join(", ", randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(1, 10))) - ); + return randomSynonymRule(randomBoolean() ? null : randomIdentifier()); + } + + public static SynonymRule randomSynonymRule(String id) { + return new SynonymRule(id, String.join(", ", randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(1, 10)))); } static SynonymSetSummary randomSynonymSetSummary() { diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index ee98f40a6cb29..23f3395c6c49e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; @@ -309,10 +310,14 @@ public void testToXContent() throws IOException { "time": 1 } }, + "mappings_updated_version" : %s, "system": false, "timestamp_range": { "shards": [] }, + "event_ingested_range": { + "unknown": true + }, "stats": { "write_load": { "loads": [-1.0], @@ -385,6 +390,7 @@ public void testToXContent() throws IOException { TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), + IndexVersion.current(), allocationId, allocationId ) @@ -572,10 +578,14 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "time" : 1 } }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] }, + "event_ingested_range" : { + "unknown" : true + }, "stats" : { "write_load" : { "loads" : [ @@ -652,6 +662,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), + IndexVersion.current(), allocationId, allocationId ), @@ -845,10 +856,14 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "time" : 1 } }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] }, + "event_ingested_range" : { + "unknown" : true + }, "stats" : { "write_load" : { "loads" : [ @@ -925,6 +940,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), + IndexVersion.current(), allocationId, allocationId ), @@ -1014,9 +1030,13 @@ public void testToXContentSameTypeName() throws IOException { "0" : [ ] }, "rollover_info" : { }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] + }, + "event_ingested_range" : { + "shards" : [ ] } } }, @@ -1032,7 +1052,7 @@ public void testToXContentSameTypeName() throws IOException { "unassigned" : [ ], "nodes" : { } } - }""", IndexVersion.current()), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testNodeFeaturesSorted() throws IOException { @@ -1088,6 +1108,7 @@ private ClusterState buildClusterState() throws IOException { .putRolloverInfo(new RolloverInfo("rolloveAlias", new ArrayList<>(), 1L)) .stats(new IndexMetadataStats(IndexWriteLoad.builder(1).build(), 120, 1)) .indexWriteLoadForecast(8.0) + .eventIngestedRange(IndexLongFieldRange.UNKNOWN, TransportVersions.V_8_0_0) .build(); return ClusterState.builder(ClusterName.DEFAULT) diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java index ea2bc79542e4a..d5ea160427952 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.action.shard; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.cluster.ClusterState; @@ -69,7 +70,14 @@ public void testEmptyTaskListProducesSameClusterState() throws Exception { public void testNonExistentIndexMarkedAsSuccessful() throws Exception { final ClusterState clusterState = stateWithNoShard(); final StartedShardUpdateTask entry = new StartedShardUpdateTask( - new StartedShardEntry(new ShardId("test", "_na", 0), "aId", randomNonNegativeLong(), "test", ShardLongFieldRange.UNKNOWN), + new StartedShardEntry( + new ShardId("test", "_na", 0), + "aId", + randomNonNegativeLong(), + "test", + ShardLongFieldRange.UNKNOWN, + ShardLongFieldRange.UNKNOWN + ), createTestListener() ); @@ -91,6 +99,7 @@ public void testNonExistentShardsAreMarkedAsSuccessful() throws Exception { String.valueOf(i), 0L, "allocation id", + ShardLongFieldRange.UNKNOWN, ShardLongFieldRange.UNKNOWN ), createTestListener() @@ -105,6 +114,7 @@ public void testNonExistentShardsAreMarkedAsSuccessful() throws Exception { String.valueOf(i), 0L, "shard id", + ShardLongFieldRange.UNKNOWN, ShardLongFieldRange.UNKNOWN ), createTestListener() @@ -133,7 +143,14 @@ public void testNonInitializingShardAreMarkedAsSuccessful() throws Exception { } final long primaryTerm = indexMetadata.primaryTerm(shardId.id()); return new StartedShardUpdateTask( - new StartedShardEntry(shardId, allocationId, primaryTerm, "test", ShardLongFieldRange.UNKNOWN), + new StartedShardEntry( + shardId, + allocationId, + primaryTerm, + "test", + ShardLongFieldRange.UNKNOWN, + ShardLongFieldRange.UNKNOWN + ), createTestListener() ); }) @@ -153,7 +170,14 @@ public void testStartPrimary() throws Exception { final String primaryAllocationId = primaryShard.allocationId().getId(); final var task = new StartedShardUpdateTask( - new StartedShardEntry(shardId, primaryAllocationId, primaryTerm, "test", ShardLongFieldRange.UNKNOWN), + new StartedShardEntry( + shardId, + primaryAllocationId, + primaryTerm, + "test", + ShardLongFieldRange.UNKNOWN, + ShardLongFieldRange.UNKNOWN + ), createTestListener() ); @@ -180,7 +204,14 @@ public void testStartReplica() throws Exception { final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().iterator().next(); final String replicaAllocationId = replicaShard.allocationId().getId(); final var task = new StartedShardUpdateTask( - new StartedShardEntry(shardId, replicaAllocationId, primaryTerm, "test", ShardLongFieldRange.UNKNOWN), + new StartedShardEntry( + shardId, + replicaAllocationId, + primaryTerm, + "test", + ShardLongFieldRange.UNKNOWN, + ShardLongFieldRange.UNKNOWN + ), createTestListener() ); @@ -208,7 +239,14 @@ public void testDuplicateStartsAreOkay() throws Exception { final List tasks = IntStream.range(0, randomIntBetween(2, 10)) .mapToObj( i -> new StartedShardUpdateTask( - new StartedShardEntry(shardId, allocationId, primaryTerm, "test", ShardLongFieldRange.UNKNOWN), + new StartedShardEntry( + shardId, + allocationId, + primaryTerm, + "test", + ShardLongFieldRange.UNKNOWN, + ShardLongFieldRange.UNKNOWN + ), createTestListener() ) ) @@ -249,6 +287,7 @@ public void testPrimaryTermsMismatchOnPrimary() throws Exception { primaryAllocationId, primaryTerm - 1, "primary terms does not match on primary", + ShardLongFieldRange.UNKNOWN, ShardLongFieldRange.UNKNOWN ), createTestListener() @@ -270,6 +309,7 @@ public void testPrimaryTermsMismatchOnPrimary() throws Exception { primaryAllocationId, primaryTerm, "primary terms match on primary", + ShardLongFieldRange.UNKNOWN, ShardLongFieldRange.UNKNOWN ), createTestListener() @@ -312,7 +352,14 @@ public void testPrimaryTermsMismatchOnReplica() throws Exception { .getId(); final StartedShardUpdateTask task = new StartedShardUpdateTask( - new StartedShardEntry(shardId, replicaAllocationId, replicaPrimaryTerm, "test on replica", ShardLongFieldRange.UNKNOWN), + new StartedShardEntry( + shardId, + replicaAllocationId, + replicaPrimaryTerm, + "test on replica", + ShardLongFieldRange.UNKNOWN, + ShardLongFieldRange.UNKNOWN + ), createTestListener() ); @@ -339,13 +386,18 @@ public void testExpandsTimestampRangeForPrimary() throws Exception { final String primaryAllocationId = primaryShard.allocationId().getId(); assertThat(indexMetadata.getTimestampRange(), sameInstance(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.NO_SHARDS)); final ShardLongFieldRange shardTimestampRange = randomBoolean() ? ShardLongFieldRange.UNKNOWN : randomBoolean() ? ShardLongFieldRange.EMPTY : ShardLongFieldRange.of(1606407943000L, 1606407944000L); + final ShardLongFieldRange shardEventIngestedRange = randomBoolean() ? ShardLongFieldRange.UNKNOWN + : randomBoolean() ? ShardLongFieldRange.EMPTY + : ShardLongFieldRange.of(1606407943000L, 1606407944000L); + final var task = new StartedShardUpdateTask( - new StartedShardEntry(shardId, primaryAllocationId, primaryTerm, "test", shardTimestampRange), + new StartedShardEntry(shardId, primaryAllocationId, primaryTerm, "test", shardTimestampRange, shardEventIngestedRange), createTestListener() ); @@ -369,6 +421,21 @@ public void testExpandsTimestampRangeForPrimary() throws Exception { assertThat(timestampRange.getMin(), equalTo(shardTimestampRange.getMin())); assertThat(timestampRange.getMax(), equalTo(shardTimestampRange.getMax())); } + + final var eventIngestedRange = resultingState.metadata().index(indexName).getEventIngestedRange(); + if (clusterState.getMinTransportVersion().before(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + assertThat(eventIngestedRange, sameInstance(IndexLongFieldRange.UNKNOWN)); + } else { + if (shardEventIngestedRange == ShardLongFieldRange.UNKNOWN) { + assertThat(eventIngestedRange, sameInstance(IndexLongFieldRange.UNKNOWN)); + } else if (shardEventIngestedRange == ShardLongFieldRange.EMPTY) { + assertThat(eventIngestedRange, sameInstance(IndexLongFieldRange.EMPTY)); + } else { + assertTrue(eventIngestedRange.isComplete()); + assertThat(eventIngestedRange.getMin(), equalTo(shardEventIngestedRange.getMin())); + assertThat(eventIngestedRange.getMax(), equalTo(shardEventIngestedRange.getMax())); + } + } } public void testExpandsTimestampRangeForReplica() throws Exception { @@ -380,15 +447,20 @@ public void testExpandsTimestampRangeForReplica() throws Exception { final long primaryTerm = indexMetadata.primaryTerm(shardId.id()); assertThat(indexMetadata.getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); final ShardLongFieldRange shardTimestampRange = randomBoolean() ? ShardLongFieldRange.UNKNOWN : randomBoolean() ? ShardLongFieldRange.EMPTY : ShardLongFieldRange.of(1606407943000L, 1606407944000L); + final ShardLongFieldRange shardEventIngestedRange = randomBoolean() ? ShardLongFieldRange.UNKNOWN + : randomBoolean() ? ShardLongFieldRange.EMPTY + : ShardLongFieldRange.of(1606407888888L, 1606407999999L); + final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().iterator().next(); final String replicaAllocationId = replicaShard.allocationId().getId(); final var task = new StartedShardUpdateTask( - new StartedShardEntry(shardId, replicaAllocationId, primaryTerm, "test", shardTimestampRange), + new StartedShardEntry(shardId, replicaAllocationId, primaryTerm, "test", shardTimestampRange, shardEventIngestedRange), createTestListener() ); final var resultingState = executeTasks(clusterState, List.of(task)); @@ -401,7 +473,9 @@ public void testExpandsTimestampRangeForReplica() throws Exception { is(ShardRoutingState.STARTED) ); - assertThat(resultingState.metadata().index(indexName).getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + final IndexMetadata latestIndexMetadata = resultingState.metadata().index(indexName); + assertThat(latestIndexMetadata.getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(latestIndexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); } private ClusterState executeTasks(final ClusterState state, final List tasks) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 100f3bbcc7829..cada467ea3ad6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterState; @@ -61,6 +62,8 @@ import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.elasticsearch.test.TransportVersionUtils.getFirstVersion; +import static org.elasticsearch.test.TransportVersionUtils.getPreviousVersion; import static org.elasticsearch.test.TransportVersionUtils.randomCompatibleVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -391,17 +394,24 @@ public void testDeduplicateRemoteShardStarted() throws InterruptedException { expectedRequests++; shardStateAction.clearRemoteShardRequestDeduplicator(); } - shardStateAction.shardStarted(startedShard, primaryTerm, "started", ShardLongFieldRange.EMPTY, new ActionListener<>() { - @Override - public void onResponse(Void aVoid) { - latch.countDown(); - } + shardStateAction.shardStarted( + startedShard, + primaryTerm, + "started", + ShardLongFieldRange.EMPTY, + ShardLongFieldRange.EMPTY, + new ActionListener<>() { + @Override + public void onResponse(Void aVoid) { + latch.countDown(); + } - @Override - public void onFailure(Exception e) { - latch.countDown(); + @Override + public void onFailure(Exception e) { + latch.countDown(); + } } - }); + ); } CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests, arrayWithSize(expectedRequests)); @@ -482,7 +492,14 @@ public void testShardStarted() throws InterruptedException { final ShardRouting shardRouting = getRandomShardRouting(index); final long primaryTerm = clusterService.state().metadata().index(shardRouting.index()).primaryTerm(shardRouting.id()); final TestListener listener = new TestListener(); - shardStateAction.shardStarted(shardRouting, primaryTerm, "testShardStarted", ShardLongFieldRange.UNKNOWN, listener); + shardStateAction.shardStarted( + shardRouting, + primaryTerm, + "testShardStarted", + ShardLongFieldRange.UNKNOWN, + ShardLongFieldRange.UNKNOWN, + listener + ); final CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests[0].request(), instanceOf(ShardStateAction.StartedShardEntry.class)); @@ -578,7 +595,37 @@ public void testStartedShardEntrySerialization() throws Exception { final TransportVersion version = randomFrom(randomCompatibleVersion(random())); final ShardLongFieldRange timestampRange = ShardLongFieldRangeWireTests.randomRange(); - final StartedShardEntry startedShardEntry = new StartedShardEntry(shardId, allocationId, primaryTerm, message, timestampRange); + final ShardLongFieldRange eventIngestedRange = ShardLongFieldRangeWireTests.randomRange(); + var startedShardEntry = new StartedShardEntry(shardId, allocationId, primaryTerm, message, timestampRange, eventIngestedRange); + try (StreamInput in = serialize(startedShardEntry, version).streamInput()) { + in.setTransportVersion(version); + final StartedShardEntry deserialized = new StartedShardEntry(in); + assertThat(deserialized.shardId, equalTo(shardId)); + assertThat(deserialized.allocationId, equalTo(allocationId)); + assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); + assertThat(deserialized.message, equalTo(message)); + assertThat(deserialized.timestampRange, equalTo(timestampRange)); + if (version.before(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)) { + assertThat(deserialized.eventIngestedRange, equalTo(ShardLongFieldRange.UNKNOWN)); + } else { + assertThat(deserialized.eventIngestedRange, equalTo(eventIngestedRange)); + } + } + } + + public void testStartedShardEntrySerializationWithOlderTransportVersion() throws Exception { + final ShardId shardId = new ShardId(randomRealisticUnicodeOfLengthBetween(10, 100), UUID.randomUUID().toString(), between(0, 1000)); + final String allocationId = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); + final long primaryTerm = randomIntBetween(0, 100); + final String message = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); + final TransportVersion version = randomFrom( + getFirstVersion(), + getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE), + getPreviousVersion(TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + ); + final ShardLongFieldRange timestampRange = ShardLongFieldRangeWireTests.randomRange(); + final ShardLongFieldRange eventIngestedRange = ShardLongFieldRangeWireTests.randomRange(); + var startedShardEntry = new StartedShardEntry(shardId, allocationId, primaryTerm, message, timestampRange, eventIngestedRange); try (StreamInput in = serialize(startedShardEntry, version).streamInput()) { in.setTransportVersion(version); final StartedShardEntry deserialized = new StartedShardEntry(in); @@ -587,6 +634,7 @@ public void testStartedShardEntrySerialization() throws Exception { assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); assertThat(deserialized.message, equalTo(message)); assertThat(deserialized.timestampRange, equalTo(timestampRange)); + assertThat(deserialized.eventIngestedRange, equalTo(ShardLongFieldRange.UNKNOWN)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java index 7f665cf241230..49c5060240809 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorVotingConfigurationTests.java @@ -458,7 +458,7 @@ public void testSettingInitialConfigurationTriggersElection() { value = "org.elasticsearch.cluster.coordination.ClusterBootstrapService:INFO" ) public void testClusterUUIDLogging() { - try (var mockLog = MockLog.capture(ClusterBootstrapService.class); var cluster = new Cluster(randomIntBetween(1, 3))) { + try (var mockLog = MockLog.capture(ClusterBootstrapService.class)) { mockLog.addExpectation( new MockLog.SeenEventExpectation( "fresh node message", @@ -468,25 +468,27 @@ public void testClusterUUIDLogging() { ) ); - cluster.runRandomly(); - cluster.stabilise(); - mockLog.assertAllExpectationsMatched(); - - final var restartingNode = cluster.getAnyNode(); - mockLog.addExpectation( - new MockLog.SeenEventExpectation( - "restarted node message", - ClusterBootstrapService.class.getCanonicalName(), - Level.INFO, - "this node is locked into cluster UUID [" - + restartingNode.getLastAppliedClusterState().metadata().clusterUUID() - + "] and will not attempt further cluster bootstrapping" - ) - ); - restartingNode.close(); - cluster.clusterNodes.replaceAll(cn -> cn == restartingNode ? cn.restartedNode() : cn); - cluster.stabilise(); - mockLog.assertAllExpectationsMatched(); + try (var cluster = new Cluster(randomIntBetween(1, 3))) { + cluster.runRandomly(); + cluster.stabilise(); + mockLog.assertAllExpectationsMatched(); + + final var restartingNode = cluster.getAnyNode(); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "restarted node message", + ClusterBootstrapService.class.getCanonicalName(), + Level.INFO, + "this node is locked into cluster UUID [" + + restartingNode.getLastAppliedClusterState().metadata().clusterUUID() + + "] and will not attempt further cluster bootstrapping" + ) + ); + restartingNode.close(); + cluster.clusterNodes.replaceAll(cn -> cn == restartingNode ? cn.restartedNode() : cn); + cluster.stabilise(); + mockLog.assertAllExpectationsMatched(); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 116acf938fcbc..527fd1f95b728 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -25,16 +25,23 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; @@ -52,6 +59,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -80,6 +88,7 @@ public void testIndexMetadataSerialization() throws IOException { Map customMap = new HashMap<>(); customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); + IndexVersion mappingsUpdatedVersion = IndexVersionUtils.randomVersion(); IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; @@ -106,10 +115,19 @@ public void testIndexMetadataSerialization() throws IOException { randomNonNegativeLong() ) ) + .mappingsUpdatedVersion(mappingsUpdatedVersion) .stats(indexStats) .indexWriteLoadForecast(indexWriteLoadForecast) .shardSizeInBytesForecast(shardSizeInBytesForecast) .putInferenceFields(inferenceFields) + .eventIngestedRange( + randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) + ) + ) .build(); assertEquals(system, metadata.isSystem()); @@ -137,6 +155,7 @@ public void testIndexMetadataSerialization() throws IOException { assertEquals(metadata.getCreationDate(), fromXContentMeta.getCreationDate()); assertEquals(metadata.getRoutingFactor(), fromXContentMeta.getRoutingFactor()); assertEquals(metadata.primaryTerm(0), fromXContentMeta.primaryTerm(0)); + assertEquals(metadata.getMappingsUpdatedVersion(), fromXContentMeta.getMappingsUpdatedVersion()); assertEquals(metadata.isSystem(), fromXContentMeta.isSystem()); Map expectedCustom = Map.of("my_custom", new DiffableStringMap(customMap)); assertEquals(metadata.getCustomData(), expectedCustom); @@ -164,12 +183,105 @@ public void testIndexMetadataSerialization() throws IOException { assertEquals(metadata.getRolloverInfos(), deserialized.getRolloverInfos()); assertEquals(deserialized.getCustomData(), expectedCustom); assertEquals(metadata.getCustomData(), deserialized.getCustomData()); + assertEquals(metadata.getMappingsUpdatedVersion(), deserialized.getMappingsUpdatedVersion()); assertEquals(metadata.isSystem(), deserialized.isSystem()); assertEquals(metadata.getStats(), deserialized.getStats()); assertEquals(metadata.getForecastedWriteLoad(), deserialized.getForecastedWriteLoad()); assertEquals(metadata.getForecastedShardSizeInBytes(), deserialized.getForecastedShardSizeInBytes()); assertEquals(metadata.getInferenceFields(), deserialized.getInferenceFields()); + assertEquals(metadata.getEventIngestedRange(), deserialized.getEventIngestedRange()); + } + } + + public void testIndexMetadataFromXContentParsingWithoutEventIngestedField() throws IOException { + Integer numShard = randomFrom(1, 2, 4, 8, 16); + int numberOfReplicas = randomIntBetween(0, 10); + final boolean system = randomBoolean(); + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); + IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; + Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; + Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; + Map inferenceFields = randomInferenceFields(); + + IndexMetadata metadata = IndexMetadata.builder("foo") + .settings(indexSettings(numShard, numberOfReplicas).put("index.version.created", 1)) + .creationDate(randomLong()) + .primaryTerm(0, 2) + .setRoutingNumShards(32) + .system(system) + .putCustom("my_custom", customMap) + .putRolloverInfo( + new RolloverInfo( + randomAlphaOfLength(5), + List.of( + new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), + new MaxDocsCondition(randomNonNegativeLong()), + new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), + new OptimalShardCountCondition(3) + ), + randomNonNegativeLong() + ) + ) + .stats(indexStats) + .indexWriteLoadForecast(indexWriteLoadForecast) + .shardSizeInBytesForecast(shardSizeInBytesForecast) + .putInferenceFields(inferenceFields) + .eventIngestedRange( + randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) + ) + ) + .build(); + assertEquals(system, metadata.isSystem()); + + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + IndexMetadata.FORMAT.toXContent(builder, metadata); + builder.endObject(); + + // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry + // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); + + @SuppressWarnings("unchecked") + Map inner = (Map) indexMetadataMap.get("foo"); + assertTrue(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + inner.remove(IndexMetadata.KEY_EVENT_INGESTED_RANGE); + // validate that the IndexMetadata.KEY_EVENT_INGESTED_RANGE has been removed before calling fromXContent + assertFalse(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + + IndexMetadata fromXContentMeta; + XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + try (XContentParser xContentParser = XContentHelper.mapToXContentParser(config, indexMetadataMap);) { + fromXContentMeta = IndexMetadata.fromXContent(xContentParser); } + + assertEquals(IndexLongFieldRange.NO_SHARDS, fromXContentMeta.getTimestampRange()); + // should come back as UNKNOWN when missing from IndexMetadata XContent + assertEquals(IndexLongFieldRange.UNKNOWN, fromXContentMeta.getEventIngestedRange()); + + // check a few other fields to ensure the parsing worked as expected + assertEquals( + "expected: " + Strings.toString(metadata) + "\nactual : " + Strings.toString(fromXContentMeta), + metadata, + fromXContentMeta + ); + assertEquals(metadata.hashCode(), fromXContentMeta.hashCode()); + assertEquals(metadata.getNumberOfReplicas(), fromXContentMeta.getNumberOfReplicas()); + assertEquals(metadata.getNumberOfShards(), fromXContentMeta.getNumberOfShards()); + assertEquals(metadata.getCreationVersion(), fromXContentMeta.getCreationVersion()); + Map expectedCustom = Map.of("my_custom", new DiffableStringMap(customMap)); + assertEquals(metadata.getCustomData(), expectedCustom); + assertEquals(metadata.getCustomData(), fromXContentMeta.getCustomData()); + assertEquals(metadata.getStats(), fromXContentMeta.getStats()); } public void testGetRoutingFactor() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index c900c3257a405..1766674ed42a1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -255,6 +255,12 @@ public void testCreateDataStreamWithFailureStoreInitialized() throws Exception { assertThat(newState.metadata().index(backingIndexName).isSystem(), is(false)); assertThat(newState.metadata().index(failureStoreIndexName), notNullValue()); assertThat(newState.metadata().index(failureStoreIndexName).getSettings().get("index.hidden"), equalTo("true")); + assertThat( + DataStreamFailureStoreDefinition.FAILURE_STORE_DEFINITION_VERSION_SETTING.get( + newState.metadata().index(failureStoreIndexName).getSettings() + ), + equalTo(DataStreamFailureStoreDefinition.FAILURE_STORE_DEFINITION_VERSION) + ); assertThat(newState.metadata().index(failureStoreIndexName).isSystem(), is(false)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 43d64522ee6fb..8a487e5653627 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -10,6 +10,8 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; @@ -47,6 +49,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.SearchExecutionContextHelper; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.indices.InvalidIndexNameException; @@ -1096,11 +1099,51 @@ public void testBuildIndexMetadata() { Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).build(); List aliases = List.of(AliasMetadata.builder("alias1").build()); - IndexMetadata indexMetadata = buildIndexMetadata("test", aliases, () -> null, indexSettings, 4, sourceIndexMetadata, false); + IndexMetadata indexMetadata = buildIndexMetadata( + "test", + aliases, + () -> null, + indexSettings, + 4, + sourceIndexMetadata, + false, + TransportVersion.current() + ); + + assertThat(indexMetadata.getAliases().size(), is(1)); + assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1")); + assertThat("The source index primary term must be used", indexMetadata.primaryTerm(0), is(3L)); + assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + } + + public void testBuildIndexMetadataWithTransportVersionBeforeEventIngestedRangeAdded() { + IndexMetadata sourceIndexMetadata = IndexMetadata.builder("parent") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build()) + .numberOfShards(1) + .numberOfReplicas(0) + .primaryTerm(0, 3L) + .build(); + + Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).build(); + List aliases = List.of(AliasMetadata.builder("alias1").build()); + IndexMetadata indexMetadata = buildIndexMetadata( + "test", + aliases, + () -> null, + indexSettings, + 4, + sourceIndexMetadata, + false, + randomFrom(TransportVersions.V_7_0_0, TransportVersions.V_8_0_0) + ); assertThat(indexMetadata.getAliases().size(), is(1)); assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1")); assertThat("The source index primary term must be used", indexMetadata.primaryTerm(0), is(3L)); + assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + // on versions before event.ingested was added to cluster state, it should default to UNKNOWN, not NO_SHARDS + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.UNKNOWN)); } public void testGetIndexNumberOfRoutingShardsWithNullSourceIndex() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java index a093178c04814..adec7b94df6a1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceBatchingTests.java @@ -19,13 +19,13 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_BLOCKS_WRITE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -126,6 +126,9 @@ public void testBatchCloseIndices() throws Exception { // wait for the queue to have the second close tasks (the close-indices tasks) assertBusy(() -> assertThat(findPendingTasks(masterService, "close-indices"), hasSize(2))); + // wait for all ongoing tasks to complete on GENERIC to ensure that the batch is fully-formed (see #109187) + flushThreadPoolExecutor(getInstanceFromNode(ThreadPool.class), ThreadPool.Names.GENERIC); + block2.run(); // release block // assert that the requests were acknowledged @@ -208,14 +211,14 @@ public void testBatchBlockIndices() throws Exception { private static CheckedRunnable blockMasterService(MasterService masterService) { final var executionBarrier = new CyclicBarrier(2); masterService.createTaskQueue("block", Priority.URGENT, batchExecutionContext -> { - executionBarrier.await(10, TimeUnit.SECONDS); // notify test thread that the master service is blocked - executionBarrier.await(10, TimeUnit.SECONDS); // wait for test thread to release us + safeAwait(executionBarrier); // notify test thread that the master service is blocked + safeAwait(executionBarrier); // wait for test thread to release us for (final var taskContext : batchExecutionContext.taskContexts()) { taskContext.success(() -> {}); } return batchExecutionContext.initialState(); }).submitTask("block", new ExpectSuccessTask(), null); - return () -> executionBarrier.await(10, TimeUnit.SECONDS); + return () -> safeAwait(executionBarrier); } private static ClusterStateListener closedIndexCountListener(int closedIndices) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMappingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMappingServiceTests.java index 5fa5ebbaf5d06..1fc7807bd9735 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMappingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataMappingServiceTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -94,6 +95,7 @@ public void testMappingVersion() throws Exception { singleTask(request) ); assertThat(resultingState.metadata().index("test").getMappingVersion(), equalTo(1 + previousVersion)); + assertThat(resultingState.metadata().index("test").getMappingsUpdatedVersion(), equalTo(IndexVersion.current())); } public void testMappingVersionUnchanged() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java index 3a522f3f5c06c..517d014c12723 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -349,9 +349,13 @@ public void testToXContentAPI_SameTypeName() throws IOException { "0" : [ ] }, "rollover_info" : { }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] + }, + "event_ingested_range" : { + "shards" : [ ] } } }, @@ -360,7 +364,7 @@ public void testToXContentAPI_SameTypeName() throws IOException { }, "reserved_state" : { } } - }""", IndexVersion.current()), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testToXContentGateway_FlatSettingFalse_ReduceMappingTrue() throws IOException { @@ -519,9 +523,13 @@ public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOExce "time" : 1 } }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] + }, + "event_ingested_range" : { + "shards" : [ ] } } }, @@ -530,7 +538,7 @@ public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOExce }, "reserved_state" : { } } - }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOException { @@ -629,9 +637,13 @@ public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOExce "time" : 1 } }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] + }, + "event_ingested_range" : { + "shards" : [ ] } } }, @@ -640,7 +652,7 @@ public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOExce }, "reserved_state" : { } } - }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testToXContentAPIReservedMetadata() throws IOException { @@ -765,9 +777,13 @@ public void testToXContentAPIReservedMetadata() throws IOException { "time" : 1 } }, + "mappings_updated_version" : %s, "system" : false, "timestamp_range" : { "shards" : [ ] + }, + "event_ingested_range" : { + "shards" : [ ] } } }, @@ -821,7 +837,7 @@ public void testToXContentAPIReservedMetadata() throws IOException { } } } - }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } private Metadata buildMetadata() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 5484998fef2e9..1bae3ca59f3d9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -36,6 +36,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.TransportVersionUtils; @@ -48,6 +50,7 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -55,9 +58,102 @@ public class ClusterSerializationTests extends ESAllocationTestCase { public void testClusterStateSerialization() throws Exception { - Metadata metadata = Metadata.builder() - .put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(10).numberOfReplicas(1)) - .build(); + IndexLongFieldRange eventIngestedRangeInput = randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(100000, 200000)) + ); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder("test") + .settings(settings(IndexVersion.current())) + .numberOfShards(10) + .numberOfReplicas(1) + .eventIngestedRange(eventIngestedRangeInput, TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE); + + ClusterStateTestRecord result = createAndSerializeClusterState(indexMetadataBuilder, TransportVersion.current()); + + assertThat(result.serializedClusterState().getClusterName().value(), equalTo(result.clusterState().getClusterName().value())); + assertThat(result.serializedClusterState().routingTable().toString(), equalTo(result.clusterState().routingTable().toString())); + + IndexLongFieldRange eventIngestedRangeOutput = result.serializedClusterState().getMetadata().index("test").getEventIngestedRange(); + assertThat(eventIngestedRangeInput, equalTo(eventIngestedRangeOutput)); + + if (eventIngestedRangeInput.containsAllShardRanges() && eventIngestedRangeInput != IndexLongFieldRange.EMPTY) { + assertThat(eventIngestedRangeOutput.getMin(), equalTo(100000L)); + assertThat(eventIngestedRangeOutput.getMax(), equalTo(200000L)); + } + } + + public void testClusterStateSerializationWithTimestampRangesWithOlderTransportVersion() throws Exception { + TransportVersion versionBeforeEventIngestedInClusterState = randomFrom( + TransportVersions.V_7_0_0, + TransportVersions.V_8_0_0, + TransportVersions.ML_INFERENCE_GOOGLE_VERTEX_AI_EMBEDDINGS_ADDED // version before EVENT_INGESTED_RANGE_IN_CLUSTER_STATE + ); + { + IndexLongFieldRange eventIngestedRangeInput = randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(100000, 200000)) + ); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder("test") + .settings(settings(IndexVersion.current())) + .numberOfShards(10) + .numberOfReplicas(1) + .eventIngestedRange(eventIngestedRangeInput, versionBeforeEventIngestedInClusterState); + + ClusterStateTestRecord result = createAndSerializeClusterState(indexMetadataBuilder, versionBeforeEventIngestedInClusterState); + + assertThat(result.serializedClusterState().getClusterName().value(), equalTo(result.clusterState().getClusterName().value())); + assertThat(result.serializedClusterState().routingTable().toString(), equalTo(result.clusterState().routingTable().toString())); + + IndexLongFieldRange eventIngestedRangeOutput = result.serializedClusterState() + .getMetadata() + .index("test") + .getEventIngestedRange(); + // should always come back as UNKNOWN when an older transport version is passed in + assertSame(IndexLongFieldRange.UNKNOWN, eventIngestedRangeOutput); + } + { + // UNKNOWN is the only allowed state for event.ingested range in older versions, so this serialization test should fail + IndexLongFieldRange eventIngestedRangeInput = randomFrom( + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(100000, 200000)) + ); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder("test") + .settings(settings(IndexVersion.current())) + .numberOfShards(10) + .numberOfReplicas(1) + .eventIngestedRange(eventIngestedRangeInput, TransportVersion.current()); + + AssertionError assertionError = expectThrows( + AssertionError.class, + () -> createAndSerializeClusterState(indexMetadataBuilder, versionBeforeEventIngestedInClusterState) + ); + + assertThat( + assertionError.getMessage(), + containsString("eventIngestedRange should be UNKNOWN until all nodes are on the new version") + ); + } + } + + /** + * @param clusterState original ClusterState created by helper method + * @param serializedClusterState serialized version of the clusterState + */ + private record ClusterStateTestRecord(ClusterState clusterState, ClusterState serializedClusterState) {} + + private static ClusterStateTestRecord createAndSerializeClusterState( + IndexMetadata.Builder indexMetadataBuilder, + TransportVersion transportVersion + ) throws IOException { + Metadata metadata = Metadata.builder().put(indexMetadataBuilder).build(); RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) .addAsNew(metadata.index("test")) @@ -82,15 +178,17 @@ public void testClusterStateSerialization() throws Exception { .routingTable(strategy.reroute(clusterState, "reroute", ActionListener.noop()).routingTable()) .build(); - ClusterState serializedClusterState = ClusterState.Builder.fromBytes( - ClusterState.Builder.toBytes(clusterState), - newNode("node1"), + BytesStreamOutput outStream = new BytesStreamOutput(); + outStream.setTransportVersion(transportVersion); + clusterState.writeTo(outStream); + StreamInput inStream = new NamedWriteableAwareStreamInput( + outStream.bytes().streamInput(), new NamedWriteableRegistry(ClusterModule.getNamedWriteables()) ); + inStream.setTransportVersion(transportVersion); + ClusterState serializedClusterState = ClusterState.readFrom(inStream, null); - assertThat(serializedClusterState.getClusterName().value(), equalTo(clusterState.getClusterName().value())); - - assertThat(serializedClusterState.routingTable().toString(), equalTo(clusterState.routingTable().toString())); + return new ClusterStateTestRecord(clusterState, serializedClusterState); } public void testRoutingTableSerialization() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java index 98da24fc75c96..5321079896b08 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java @@ -71,6 +71,11 @@ public void testCircuitBreakerSettings() { "indices.breaker.total.limit", ByteSizeValue.ofBytes((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * defaultTotalPercentage)) ); + assertWarnings( + "[indices.breaker.total.limit] setting of [25%] is below the recommended minimum of 50.0% of the heap", + "[indices.breaker.total.limit] should be specified using a percentage of the heap. " + + "Absolute size settings will be forbidden in a future release" + ); assertMemorySizeSetting( HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.fielddata.limit", diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 3b0935e8f7b5c..e10cca58f8b78 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -50,14 +50,17 @@ private void assertParseException(String input, String format) { } private void assertParseException(String input, String format, int errorIndex) { - assertParseException(input, format, equalTo(errorIndex)); + assertParseException(input, DateFormatter.forPattern(format), equalTo(errorIndex)); } - private void assertParseException(String input, String format, Matcher indexMatcher) { - DateFormatter javaTimeFormatter = DateFormatter.forPattern(format); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> javaTimeFormatter.parse(input)); + private void assertParseException(String input, DateFormatter formatter, int errorIndex) { + assertParseException(input, formatter, equalTo(errorIndex)); + } + + private void assertParseException(String input, DateFormatter formatter, Matcher indexMatcher) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse(input)); assertThat(e.getMessage(), containsString(input)); - assertThat(e.getMessage(), containsString(format)); + assertThat(e.getMessage(), containsString(formatter.pattern())); assertThat(e.getCause(), instanceOf(DateTimeParseException.class)); assertThat(((DateTimeParseException) e.getCause()).getErrorIndex(), indexMatcher); } @@ -811,6 +814,20 @@ public void testDecimalPointParsing() { assertParseException("2001-01-01T00:00:00.123,456Z", "date_optional_time", 23); // This should fail, but java is ok with this because the field has the same value // assertJavaTimeParseException("2001-01-01T00:00:00.123,123Z", "strict_date_optional_time_nanos"); + + // for historical reasons, + // despite the use of a locale with , separator these formatters still expect only . decimals + DateFormatter formatter = DateFormatter.forPattern("strict_date_time").withLocale(Locale.FRANCE); + assertParses("2020-01-01T12:00:00.0Z", formatter); + assertParseException("2020-01-01T12:00:00,0Z", formatter, 19); + + formatter = DateFormatter.forPattern("strict_date_hour_minute_second_fraction").withLocale(Locale.GERMANY); + assertParses("2020-01-01T12:00:00.0", formatter); + assertParseException("2020-01-01T12:00:00,0", formatter, 19); + + formatter = DateFormatter.forPattern("strict_date_hour_minute_second_millis").withLocale(Locale.ITALY); + assertParses("2020-01-01T12:00:00.0", formatter); + assertParseException("2020-01-01T12:00:00,0", formatter, 19); } public void testTimeZoneFormatting() { diff --git a/server/src/test/java/org/elasticsearch/common/time/Iso8601ParserTests.java b/server/src/test/java/org/elasticsearch/common/time/Iso8601ParserTests.java index 185c9aa983aaa..18d4e3b624465 100644 --- a/server/src/test/java/org/elasticsearch/common/time/Iso8601ParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/Iso8601ParserTests.java @@ -33,6 +33,12 @@ import static java.time.temporal.ChronoField.NANO_OF_SECOND; import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; import static java.time.temporal.ChronoField.YEAR; +import static org.elasticsearch.common.time.DecimalSeparator.BOTH; +import static org.elasticsearch.common.time.DecimalSeparator.COMMA; +import static org.elasticsearch.common.time.DecimalSeparator.DOT; +import static org.elasticsearch.common.time.TimezonePresence.FORBIDDEN; +import static org.elasticsearch.common.time.TimezonePresence.MANDATORY; +import static org.elasticsearch.common.time.TimezonePresence.OPTIONAL; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -42,7 +48,7 @@ public class Iso8601ParserTests extends ESTestCase { private static Iso8601Parser defaultParser() { - return new Iso8601Parser(Set.of(), true, Map.of()); + return new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, Map.of()); } private static Matcher hasResult(DateTime dateTime) { @@ -77,68 +83,193 @@ public void testOutOfRange() { public void testMandatoryFields() { assertThat( - new Iso8601Parser(Set.of(YEAR), true, Map.of()).tryParse("2023", null), + new Iso8601Parser(Set.of(YEAR), true, null, BOTH, OPTIONAL, Map.of()).tryParse("2023", null), hasResult(new DateTime(2023, null, null, null, null, null, null, null, null)) ); - assertThat(new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR), true, Map.of()).tryParse("2023", null), hasError(4)); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR), true, null, BOTH, OPTIONAL, Map.of()).tryParse("2023", null), + hasError(4) + ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR), true, Map.of()).tryParse("2023-06", null), + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR), true, null, BOTH, OPTIONAL, Map.of()).tryParse("2023-06", null), hasResult(new DateTime(2023, 6, null, null, null, null, null, null, null)) ); - assertThat(new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH), true, Map.of()).tryParse("2023-06", null), hasError(7)); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH), true, null, BOTH, OPTIONAL, Map.of()).tryParse("2023-06", null), + hasError(7) + ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH), true, Map.of()).tryParse("2023-06-20", null), + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH), true, null, BOTH, OPTIONAL, Map.of()).tryParse("2023-06-20", null), hasResult(new DateTime(2023, 6, 20, null, null, null, null, null, null)) ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY), false, Map.of()).tryParse("2023-06-20", null), + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY), false, null, BOTH, OPTIONAL, Map.of()).tryParse( + "2023-06-20", + null + ), hasError(10) ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY), false, Map.of()).tryParse("2023-06-20T15", null), - hasResult(new DateTime(2023, 6, 20, 15, 0, 0, 0, null, null)) - ); - assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, Map.of()).tryParse( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY), false, null, BOTH, OPTIONAL, Map.of()).tryParse( "2023-06-20T15", null ), + hasResult(new DateTime(2023, 6, 20, 15, 0, 0, 0, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, null, BOTH, OPTIONAL, Map.of()) + .tryParse("2023-06-20T15", null), hasError(13) ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, Map.of()).tryParse( - "2023-06-20T15Z", - null - ), + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, null, BOTH, OPTIONAL, Map.of()) + .tryParse("2023-06-20T15Z", null), hasError(13) ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, Map.of()).tryParse( - "2023-06-20T15:48", - null - ), + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, null, BOTH, OPTIONAL, Map.of()) + .tryParse("2023-06-20T15:48", null), hasResult(new DateTime(2023, 6, 20, 15, 48, 0, 0, null, null)) ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), false, Map.of()) - .tryParse("2023-06-20T15:48", null), + new Iso8601Parser( + Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + null, + BOTH, + OPTIONAL, + Map.of() + ).tryParse("2023-06-20T15:48", null), hasError(16) ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), false, Map.of()) - .tryParse("2023-06-20T15:48Z", null), + new Iso8601Parser( + Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + null, + BOTH, + OPTIONAL, + Map.of() + ).tryParse("2023-06-20T15:48Z", null), hasError(16) ); assertThat( - new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), false, Map.of()) - .tryParse("2023-06-20T15:48:09", null), + new Iso8601Parser( + Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + null, + BOTH, + OPTIONAL, + Map.of() + ).tryParse("2023-06-20T15:48:09", null), hasResult(new DateTime(2023, 6, 20, 15, 48, 9, 0, null, null)) ); + + assertThat( + new Iso8601Parser( + Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + BOTH, + OPTIONAL, + Map.of() + ).tryParse("2023-06-20T15:48:09", null), + hasError(19) + ); + assertThat( + new Iso8601Parser( + Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + BOTH, + OPTIONAL, + Map.of() + ).tryParse("2023-06-20T15:48:09.5", null), + hasResult(new DateTime(2023, 6, 20, 15, 48, 9, 500_000_000, null, null)) + ); + } + + public void testMaxAllowedField() { + assertThat( + new Iso8601Parser(Set.of(), false, YEAR, BOTH, FORBIDDEN, Map.of()).tryParse("2023", null), + hasResult(new DateTime(2023, null, null, null, null, null, null, null, null)) + ); + assertThat(new Iso8601Parser(Set.of(), false, YEAR, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01", null), hasError(4)); + + assertThat( + new Iso8601Parser(Set.of(), false, MONTH_OF_YEAR, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01", null), + hasResult(new DateTime(2023, 1, null, null, null, null, null, null, null)) + ); + assertThat(new Iso8601Parser(Set.of(), false, MONTH_OF_YEAR, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01", null), hasError(7)); + + assertThat( + new Iso8601Parser(Set.of(), false, DAY_OF_MONTH, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01", null), + hasResult(new DateTime(2023, 1, 1, null, null, null, null, null, null)) + ); + assertThat(new Iso8601Parser(Set.of(), false, DAY_OF_MONTH, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T", null), hasError(10)); + assertThat( + new Iso8601Parser(Set.of(), false, DAY_OF_MONTH, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12", null), + hasError(10) + ); + + assertThat( + new Iso8601Parser(Set.of(), false, HOUR_OF_DAY, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(), false, HOUR_OF_DAY, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00", null), + hasError(13) + ); + + assertThat( + new Iso8601Parser(Set.of(), false, MINUTE_OF_HOUR, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(), false, MINUTE_OF_HOUR, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00:00", null), + hasError(16) + ); + + assertThat( + new Iso8601Parser(Set.of(), false, SECOND_OF_MINUTE, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00:00", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(), false, SECOND_OF_MINUTE, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00:00.5", null), + hasError(19) + ); + } + + public void testTimezoneForbidden() { + assertThat(new Iso8601Parser(Set.of(), false, null, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12Z", null), hasError(13)); + assertThat(new Iso8601Parser(Set.of(), false, null, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00Z", null), hasError(16)); + assertThat( + new Iso8601Parser(Set.of(), false, null, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00:00Z", null), + hasError(19) + ); + + // a default timezone should still make it through + ZoneOffset zoneId = ZoneOffset.ofHours(2); + assertThat( + new Iso8601Parser(Set.of(), false, null, BOTH, FORBIDDEN, Map.of()).tryParse("2023-01-01T12:00:00", zoneId), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, zoneId, zoneId)) + ); + } + + public void testTimezoneMandatory() { + assertThat(new Iso8601Parser(Set.of(), false, null, BOTH, MANDATORY, Map.of()).tryParse("2023-01-01T12", null), hasError(13)); + assertThat(new Iso8601Parser(Set.of(), false, null, BOTH, MANDATORY, Map.of()).tryParse("2023-01-01T12:00", null), hasError(16)); + assertThat(new Iso8601Parser(Set.of(), false, null, BOTH, MANDATORY, Map.of()).tryParse("2023-01-01T12:00:00", null), hasError(19)); + + assertThat( + new Iso8601Parser(Set.of(), false, null, BOTH, MANDATORY, Map.of()).tryParse("2023-01-01T12:00:00Z", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, ZoneOffset.UTC, ZoneOffset.UTC)) + ); } public void testParseNanos() { @@ -188,6 +319,41 @@ public void testParseNanos() { assertThat(defaultParser().tryParse("2023-01-01T12:00:00.0000000005", null), hasError(29)); } + public void testParseDecimalSeparator() { + assertThat( + new Iso8601Parser(Set.of(), false, null, BOTH, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00.0", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(), false, null, BOTH, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00,0", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, null, null)) + ); + + assertThat( + new Iso8601Parser(Set.of(), false, null, DOT, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00.0", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, null, null)) + ); + assertThat(new Iso8601Parser(Set.of(), false, null, DOT, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00,0", null), hasError(19)); + + assertThat( + new Iso8601Parser(Set.of(), false, null, COMMA, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00.0", null), + hasError(19) + ); + assertThat( + new Iso8601Parser(Set.of(), false, null, COMMA, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00,0", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 0, null, null)) + ); + + assertThat( + new Iso8601Parser(Set.of(), false, null, BOTH, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00+0", null), + hasError(19) + ); + assertThat( + new Iso8601Parser(Set.of(), false, null, BOTH, OPTIONAL, Map.of()).tryParse("2023-01-01T12:00:00+0", null), + hasError(19) + ); + } + private static Matcher hasTimezone(ZoneId offset) { return transformedMatch(r -> r.result().query(TemporalQueries.zone()), equalTo(offset)); } @@ -351,7 +517,7 @@ public void testDefaults() { ); assertThat( - new Iso8601Parser(Set.of(), true, defaults).tryParse("2023", null), + new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, defaults).tryParse("2023", null), hasResult( new DateTime( 2023, @@ -367,7 +533,7 @@ public void testDefaults() { ) ); assertThat( - new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01", null), + new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, defaults).tryParse("2023-01", null), hasResult( new DateTime( 2023, @@ -383,7 +549,7 @@ public void testDefaults() { ) ); assertThat( - new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01", null), + new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, defaults).tryParse("2023-01-01", null), hasResult( new DateTime( 2023, @@ -399,7 +565,7 @@ public void testDefaults() { ) ); assertThat( - new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00", null), + new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, defaults).tryParse("2023-01-01T00", null), hasResult( new DateTime( 2023, @@ -415,15 +581,15 @@ public void testDefaults() { ) ); assertThat( - new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00:00", null), + new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, defaults).tryParse("2023-01-01T00:00", null), hasResult(new DateTime(2023, 1, 1, 0, 0, defaults.get(SECOND_OF_MINUTE), defaults.get(NANO_OF_SECOND), null, null)) ); assertThat( - new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00:00:00", null), + new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, defaults).tryParse("2023-01-01T00:00:00", null), hasResult(new DateTime(2023, 1, 1, 0, 0, 0, defaults.get(NANO_OF_SECOND), null, null)) ); assertThat( - new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00:00:00.0", null), + new Iso8601Parser(Set.of(), true, null, BOTH, OPTIONAL, defaults).tryParse("2023-01-01T00:00:00.0", null), hasResult(new DateTime(2023, 1, 1, 0, 0, 0, 0, null, null)) ); } diff --git a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java index f81a4bd2f4a18..e3f2522de4813 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java @@ -51,6 +51,27 @@ public void testRandom() { } } + public void testRandomSetValue() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int numBits = randomIntBetween(1000, 10000); + for (int step = 0; step < 3; step++) { + boolean[] bits = new boolean[numBits]; + List slots = new ArrayList<>(); + for (int i = 0; i < numBits; i++) { + bits[i] = randomBoolean(); + slots.add(i); + } + Collections.shuffle(slots, random()); + for (int i : slots) { + bitArray.set(i, bits[i]); + } + for (int i = 0; i < numBits; i++) { + assertEquals(bitArray.get(i), bits[i]); + } + } + } + } + public void testVeryLarge() { assumeThat(Runtime.getRuntime().maxMemory(), greaterThanOrEqualTo(ByteSizeUnit.MB.toBytes(512))); try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { @@ -183,6 +204,78 @@ public void testGetAndSet() { } } + public void testFillTrueRandom() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = randomIntBetween(0, 1000); + int to = randomIntBetween(from, 1000); + + bitArray.fill(0, 1000, false); + bitArray.fill(from, to, true); + + for (int i = 0; i < 1000; i++) { + if (i < from || i >= to) { + assertFalse(bitArray.get(i)); + } else { + assertTrue(bitArray.get(i)); + } + } + } + } + + public void testFillFalseRandom() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = randomIntBetween(0, 1000); + int to = randomIntBetween(from, 1000); + + bitArray.fill(0, 1000, true); + bitArray.fill(from, to, false); + + for (int i = 0; i < 1000; i++) { + if (i < from || i >= to) { + assertTrue(bitArray.get(i)); + } else { + assertFalse(bitArray.get(i)); + } + } + } + } + + public void testFillTrueSingleWord() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = 8; + int to = 56; + + bitArray.fill(0, 64, false); + bitArray.fill(from, to, true); + + for (int i = 0; i < 64; i++) { + if (i < from || i >= to) { + assertFalse(bitArray.get(i)); + } else { + assertTrue(bitArray.get(i)); + } + } + } + } + + public void testFillFalseSingleWord() { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { + int from = 8; + int to = 56; + + bitArray.fill(0, 64, true); + bitArray.fill(from, to, false); + + for (int i = 0; i < 64; i++) { + if (i < from || i >= to) { + assertTrue(bitArray.get(i)); + } else { + assertFalse(bitArray.get(i)); + } + } + } + } + public void testSerialize() throws Exception { int initial = randomIntBetween(1, 100_000); BitArray bits1 = new BitArray(initial, BigArrays.NON_RECYCLING_INSTANCE); diff --git a/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java b/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java index 30412059394cd..b038b6effd08f 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CancellableSingleObjectCacheTests.java @@ -48,7 +48,7 @@ public void testNoPendingRefreshIfAlreadyCancelled() { expectThrows(ExecutionException.class, TaskCancelledException.class, future::get); } - public void testListenersCompletedByRefresh() { + public void testListenersCompletedByRefresh() throws ExecutionException { final TestCache testCache = new TestCache(); // The first get() calls the refresh function @@ -81,7 +81,7 @@ public void testListenersCompletedByRefresh() { assertThat(future3.result(), equalTo(2)); } - public void testListenerCompletedByRefreshEvenIfDiscarded() { + public void testListenerCompletedByRefreshEvenIfDiscarded() throws ExecutionException { final TestCache testCache = new TestCache(); // This computation is discarded before it completes. @@ -103,7 +103,7 @@ public void testListenerCompletedByRefreshEvenIfDiscarded() { assertThat(future1.result(), sameInstance(future2.result())); } - public void testListenerCompletedWithCancellationExceptionIfRefreshCancelled() { + public void testListenerCompletedWithCancellationExceptionIfRefreshCancelled() throws ExecutionException { final TestCache testCache = new TestCache(); // This computation is discarded before it completes. @@ -120,12 +120,12 @@ public void testListenerCompletedWithCancellationExceptionIfRefreshCancelled() { testCache.get("bar", () -> false, future2); testCache.assertPendingRefreshes(2); testCache.assertNextRefreshCancelled(); - expectThrows(TaskCancelledException.class, future1::result); + expectThrows(ExecutionException.class, TaskCancelledException.class, future1::result); testCache.completeNextRefresh("bar", 2); assertThat(future2.result(), equalTo(2)); } - public void testListenerCompletedWithFresherInputIfSuperseded() { + public void testListenerCompletedWithFresherInputIfSuperseded() throws ExecutionException { final TestCache testCache = new TestCache(); // This computation is superseded before it completes. @@ -164,10 +164,10 @@ public void testRunsCancellationChecksEvenWhenSuperseded() { isCancelled.set(true); testCache.completeNextRefresh("bar", 1); - expectThrows(TaskCancelledException.class, future1::result); + expectThrows(ExecutionException.class, TaskCancelledException.class, future1::result); } - public void testExceptionCompletesListenersButIsNotCached() { + public void testExceptionCompletesListenersButIsNotCached() throws ExecutionException { final TestCache testCache = new TestCache(); // If a refresh results in an exception then all the pending get() calls complete exceptionally @@ -178,8 +178,8 @@ public void testExceptionCompletesListenersButIsNotCached() { testCache.assertPendingRefreshes(1); final ElasticsearchException exception = new ElasticsearchException("simulated"); testCache.completeNextRefresh(exception); - assertSame(exception, expectThrows(ElasticsearchException.class, future0::result)); - assertSame(exception, expectThrows(ElasticsearchException.class, future1::result)); + assertSame(exception, expectThrows(ExecutionException.class, ElasticsearchException.class, future0::result)); + assertSame(exception, expectThrows(ExecutionException.class, ElasticsearchException.class, future1::result)); testCache.assertNoPendingRefreshes(); // The exception is not cached, however, so a subsequent get() call with a matching key performs another refresh @@ -187,7 +187,7 @@ public void testExceptionCompletesListenersButIsNotCached() { testCache.get("foo", () -> false, future2); testCache.assertPendingRefreshes(1); testCache.completeNextRefresh("foo", 1); - assertThat(future2.actionResult(), equalTo(1)); + assertThat(future2.result(), equalTo(1)); } public void testConcurrentRefreshesAndCancellation() throws InterruptedException { @@ -416,7 +416,7 @@ protected String getKey(String s) { testCache.get("successful", () -> false, successfulFuture); cancelledThread.join(); - expectThrows(TaskCancelledException.class, cancelledFuture::result); + expectThrows(ExecutionException.class, TaskCancelledException.class, cancelledFuture::result); } private static final ThreadContext testThreadContext = new ThreadContext(Settings.EMPTY); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java index 65bcb473f7d22..0392a3f5ab4e1 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java @@ -54,32 +54,19 @@ protected void write(List>> candidates) throws }; Semaphore semaphore = new Semaphore(Integer.MAX_VALUE); final int count = randomIntBetween(1000, 20000); - Thread[] thread = new Thread[randomIntBetween(3, 10)]; - CountDownLatch latch = new CountDownLatch(thread.length); - for (int i = 0; i < thread.length; i++) { - thread[i] = new Thread() { - @Override - public void run() { - try { - latch.countDown(); - latch.await(); - for (int i = 0; i < count; i++) { - semaphore.acquire(); - processor.put(new Object(), (ex) -> semaphore.release()); - } - } catch (Exception ex) { - throw new RuntimeException(ex); - } + final int threads = randomIntBetween(3, 10); + startInParallel(threads, t -> { + for (int i = 0; i < count; i++) { + try { + semaphore.acquire(); + processor.put(new Object(), (ex) -> semaphore.release()); + } catch (Exception ex) { + throw new RuntimeException(ex); } - }; - thread[i].start(); - } - - for (int i = 0; i < thread.length; i++) { - thread[i].join(); - } + } + }); safeAcquire(10, semaphore); - assertEquals(count * thread.length, received.get()); + assertEquals(count * threads, received.get()); } public void testRandomFail() throws InterruptedException { @@ -102,37 +89,24 @@ protected void write(List>> candidates) throws }; Semaphore semaphore = new Semaphore(Integer.MAX_VALUE); final int count = randomIntBetween(1000, 20000); - Thread[] thread = new Thread[randomIntBetween(3, 10)]; - CountDownLatch latch = new CountDownLatch(thread.length); - for (int i = 0; i < thread.length; i++) { - thread[i] = new Thread() { - @Override - public void run() { - try { - latch.countDown(); - latch.await(); - for (int i = 0; i < count; i++) { - semaphore.acquire(); - processor.put(new Object(), (ex) -> { - if (ex != null) { - actualFailed.incrementAndGet(); - } - semaphore.release(); - }); + final int threads = randomIntBetween(3, 10); + startInParallel(threads, t -> { + try { + for (int i = 0; i < count; i++) { + semaphore.acquire(); + processor.put(new Object(), (ex) -> { + if (ex != null) { + actualFailed.incrementAndGet(); } - } catch (Exception ex) { - throw new RuntimeException(ex); - } + semaphore.release(); + }); } - }; - thread[i].start(); - } - - for (int i = 0; i < thread.length; i++) { - thread[i].join(); - } + } catch (Exception ex) { + throw new RuntimeException(ex); + } + }); safeAcquire(Integer.MAX_VALUE, semaphore); - assertEquals(count * thread.length, received.get()); + assertEquals(count * threads, received.get()); assertEquals(actualFailed.get(), failed.get()); } @@ -226,7 +200,7 @@ public void run() { threads.forEach(t -> assertFalse(t.isAlive())); } - public void testSlowConsumer() { + public void testSlowConsumer() throws InterruptedException { AtomicInteger received = new AtomicInteger(0); AtomicInteger notified = new AtomicInteger(0); @@ -240,39 +214,23 @@ protected void write(List>> candidates) throws int threadCount = randomIntBetween(2, 10); CyclicBarrier barrier = new CyclicBarrier(threadCount); Semaphore serializePutSemaphore = new Semaphore(1); - List threads = IntStream.range(0, threadCount).mapToObj(i -> new Thread(getTestName() + "_" + i) { - { - setDaemon(true); - } - - @Override - public void run() { - try { - assertTrue(serializePutSemaphore.tryAcquire(10, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - processor.put(new Object(), (e) -> { - serializePutSemaphore.release(); - try { - barrier.await(10, TimeUnit.SECONDS); - } catch (InterruptedException | BrokenBarrierException | TimeoutException ex) { - throw new RuntimeException(ex); - } - notified.incrementAndGet(); - }); - } - }).toList(); - threads.forEach(Thread::start); - threads.forEach(t -> { + runInParallel(threadCount, t -> { try { - t.join(20000); + assertTrue(serializePutSemaphore.tryAcquire(10, TimeUnit.SECONDS)); } catch (InterruptedException e) { throw new RuntimeException(e); } + processor.put(new Object(), (e) -> { + serializePutSemaphore.release(); + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (InterruptedException | BrokenBarrierException | TimeoutException ex) { + throw new RuntimeException(ex); + } + notified.incrementAndGet(); + }); }); assertEquals(threadCount, notified.get()); assertEquals(threadCount, received.get()); - threads.forEach(t -> assertFalse(t.isAlive())); } } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/ListenableFutureTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/ListenableFutureTests.java index 2d1ec3e53da5f..74136448d2147 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/ListenableFutureTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/ListenableFutureTests.java @@ -189,10 +189,10 @@ public void testRejection() { safeAwait(barrier); // release blocked executor if (success) { - expectThrows(EsRejectedExecutionException.class, future2::result); + expectThrows(ExecutionException.class, EsRejectedExecutionException.class, future2::result); assertNull(future1.actionGet(10, TimeUnit.SECONDS)); } else { - var exception = expectThrows(EsRejectedExecutionException.class, future2::result); + var exception = expectThrows(ExecutionException.class, EsRejectedExecutionException.class, future2::result); assertEquals(1, exception.getSuppressed().length); assertThat(exception.getSuppressed()[0], instanceOf(ElasticsearchException.class)); assertEquals( diff --git a/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java index 8ca96aff9c3e5..5c6afc1e805ce 100644 --- a/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -18,6 +18,8 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.Nullable; @@ -159,13 +161,16 @@ public void testLogsFullConnectionFailureAfterSuccessfulHandshake() throws Excep "message", HandshakingTransportAddressConnector.class.getCanonicalName(), Level.WARN, - "completed handshake with [" - + remoteNode.descriptionWithoutAttributes() - + "] at [" - + discoveryAddress - + "] but followup connection to [" - + remoteNodeAddress - + "] failed" + Strings.format( + """ + Successfully discovered master-eligible node [%s] at address [%s] but could not connect to it at its publish \ + address of [%s]. Each node in a cluster must be accessible at its publish address by all other nodes in the \ + cluster. See %s for more information.""", + remoteNode.descriptionWithoutAttributes(), + discoveryAddress, + remoteNodeAddress, + ReferenceDocs.NETWORK_BINDING_AND_PUBLISHING + ) ) ); diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index 8dcecca0f65c0..26087ce5f1f0b 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; @@ -1171,7 +1172,7 @@ public Collection getRestHeaders() { null, new UsageService(), null, - null, + TelemetryProvider.NOOP, mock(ClusterService.class), null, List.of(), diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsOverrideTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsOverrideTests.java new file mode 100644 index 0000000000000..307c770d22122 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsOverrideTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_VERSION_CREATED; +import static org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING; + +public class IndexSettingsOverrideTests extends ESTestCase { + + public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { + return IndexMetadata.builder(name) + .settings(indexSettings(IndexVersion.current(), randomIntBetween(1, 3), randomIntBetween(1, 3)).put(indexSettings)) + .build(); + } + + public void testStatelessMinRefreshIntervalOverride() { + assumeTrue( + "This test depends on system property configured in build.gradle", + Boolean.parseBoolean( + System.getProperty(IndexSettings.RefreshIntervalValidator.STATELESS_ALLOW_INDEX_REFRESH_INTERVAL_OVERRIDE, "false") + ) + ); + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), "stateless") + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put(SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersions.V_8_10_0.id() + 1) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertEquals(TimeValue.timeValueSeconds(1), settings.getRefreshInterval()); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index d2b2926af7d4c..379adc9ce517a 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -219,7 +219,7 @@ private Sort buildIndexSort(IndexSettings indexSettings, Map indexFieldDataService.getForField( ft, - new FieldDataContext("test", s, Set::of, MappedFieldType.FielddataOperation.SEARCH) + new FieldDataContext("test", indexSettings, s, Set::of, MappedFieldType.FielddataOperation.SEARCH) ) ); } diff --git a/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java b/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java index 6118a84814462..f9b39bd665abd 100644 --- a/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java @@ -121,9 +121,10 @@ public void testSetDefaultTimeRangeValue() { public void testRequiredRouting() { Settings s = getSettings(); + var mapperService = new TestMapperServiceBuilder().settings(s).applyDefaultMapping(false).build(); Exception e = expectThrows( IllegalArgumentException.class, - () -> createMapperService(s, topMapping(b -> b.startObject("_routing").field("required", true).endObject())) + () -> withMapping(mapperService, topMapping(b -> b.startObject("_routing").field("required", true).endObject())) ); assertThat(e.getMessage(), equalTo("routing is forbidden on CRUD operations that target indices in [index.mode=time_series]")); } diff --git a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java index ac1232b6246ba..e8949dda78f7f 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java @@ -378,6 +378,9 @@ private void assertNumericDocValues(LeafReader baselineReader, LeafReader conten for (int i = 0; i < docIdsToAdvanceTo.length; i++) { int docId = docIdsToAdvanceTo[i]; int baselineTarget = assertAdvance(docId, baselineReader, contenderReader, baseline, contender); + if (baselineTarget == NO_MORE_DOCS) { + break; + } assertEquals(baseline.longValue(), contender.longValue()); i = shouldSkipDocIds(i, docId, baselineTarget, docIdsToAdvanceTo); if (i == -1) { @@ -394,7 +397,9 @@ private void assertNumericDocValues(LeafReader baselineReader, LeafReader conten boolean contenderResult = contender.advanceExact(docId); assertEquals(baselineResult, contenderResult); assertEquals(baseline.docID(), contender.docID()); - assertEquals(baseline.longValue(), contender.longValue()); + if (baselineResult) { + assertEquals(baseline.longValue(), contender.longValue()); + } } } } @@ -418,6 +423,9 @@ private void assertBinaryDocValues(LeafReader baselineReader, LeafReader contend for (int i = 0; i < docIdsToAdvanceTo.length; i++) { int docId = docIdsToAdvanceTo[i]; int baselineTarget = assertAdvance(docId, baselineReader, contenderReader, baseline, contender); + if (baselineTarget != NO_MORE_DOCS) { + break; + } assertEquals(baseline.binaryValue(), contender.binaryValue()); i = shouldSkipDocIds(i, docId, baselineTarget, docIdsToAdvanceTo); if (i == -1) { @@ -434,7 +442,9 @@ private void assertBinaryDocValues(LeafReader baselineReader, LeafReader contend boolean contenderResult = contender.advanceExact(docId); assertEquals(baselineResult, contenderResult); assertEquals(baseline.docID(), contender.docID()); - assertEquals(baseline.binaryValue(), contender.binaryValue()); + if (baselineResult) { + assertEquals(baseline.binaryValue(), contender.binaryValue()); + } } } } diff --git a/server/src/test/java/org/elasticsearch/index/codec/vectors/BaseKnnBitVectorsFormatTestCase.java b/server/src/test/java/org/elasticsearch/index/codec/vectors/BaseKnnBitVectorsFormatTestCase.java new file mode 100644 index 0000000000000..ba4d5275214b6 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/vectors/BaseKnnBitVectorsFormatTestCase.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.vectors; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KnnByteVectorField; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.ByteVectorValues; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.BaseIndexFileFormatTestCase; +import org.elasticsearch.common.logging.LogConfigurator; + +import java.io.IOException; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +abstract class BaseKnnBitVectorsFormatTestCase extends BaseIndexFileFormatTestCase { + + static { + LogConfigurator.loadLog4jPlugins(); + LogConfigurator.configureESLogging(); // native access requires logging to be initialized + } + + @Override + protected void addRandomFields(Document doc) { + doc.add(new KnnByteVectorField("v2", randomVector(30), similarityFunction)); + } + + protected VectorSimilarityFunction similarityFunction; + + protected VectorSimilarityFunction randomSimilarity() { + return VectorSimilarityFunction.values()[random().nextInt(VectorSimilarityFunction.values().length)]; + } + + byte[] randomVector(int dims) { + byte[] vector = new byte[dims]; + random().nextBytes(vector); + return vector; + } + + public void testRandom() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + if (random().nextBoolean()) { + iwc.setIndexSort(new Sort(new SortField("sortkey", SortField.Type.INT))); + } + String fieldName = "field"; + try (Directory dir = newDirectory(); IndexWriter iw = new IndexWriter(dir, iwc)) { + int numDoc = atLeast(100); + int dimension = atLeast(10); + if (dimension % 2 != 0) { + dimension++; + } + byte[] scratch = new byte[dimension]; + int numValues = 0; + byte[][] values = new byte[numDoc][]; + for (int i = 0; i < numDoc; i++) { + if (random().nextInt(7) != 3) { + // usually index a vector value for a doc + values[i] = randomVector(dimension); + ++numValues; + } + if (random().nextBoolean() && values[i] != null) { + // sometimes use a shared scratch array + System.arraycopy(values[i], 0, scratch, 0, scratch.length); + add(iw, fieldName, i, scratch, similarityFunction); + } else { + add(iw, fieldName, i, values[i], similarityFunction); + } + if (random().nextInt(10) == 2) { + // sometimes delete a random document + int idToDelete = random().nextInt(i + 1); + iw.deleteDocuments(new Term("id", Integer.toString(idToDelete))); + // and remember that it was deleted + if (values[idToDelete] != null) { + values[idToDelete] = null; + --numValues; + } + } + if (random().nextInt(10) == 3) { + iw.commit(); + } + } + int numDeletes = 0; + try (IndexReader reader = DirectoryReader.open(iw)) { + int valueCount = 0, totalSize = 0; + for (LeafReaderContext ctx : reader.leaves()) { + ByteVectorValues vectorValues = ctx.reader().getByteVectorValues(fieldName); + if (vectorValues == null) { + continue; + } + totalSize += vectorValues.size(); + StoredFields storedFields = ctx.reader().storedFields(); + int docId; + while ((docId = vectorValues.nextDoc()) != NO_MORE_DOCS) { + byte[] v = vectorValues.vectorValue(); + assertEquals(dimension, v.length); + String idString = storedFields.document(docId).getField("id").stringValue(); + int id = Integer.parseInt(idString); + if (ctx.reader().getLiveDocs() == null || ctx.reader().getLiveDocs().get(docId)) { + assertArrayEquals(idString, values[id], v); + ++valueCount; + } else { + ++numDeletes; + assertNull(values[id]); + } + } + } + assertEquals(numValues, valueCount); + assertEquals(numValues, totalSize - numDeletes); + } + } + } + + private void add(IndexWriter iw, String field, int id, byte[] vector, VectorSimilarityFunction similarity) throws IOException { + add(iw, field, id, random().nextInt(100), vector, similarity); + } + + private void add(IndexWriter iw, String field, int id, int sortKey, byte[] vector, VectorSimilarityFunction similarityFunction) + throws IOException { + Document doc = new Document(); + if (vector != null) { + doc.add(new KnnByteVectorField(field, vector, similarityFunction)); + } + doc.add(new NumericDocValuesField("sortkey", sortKey)); + String idString = Integer.toString(id); + doc.add(new StringField("id", idString, Field.Store.YES)); + Term idTerm = new Term("id", idString); + iw.updateDocument(idTerm, doc); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormatTests.java index 2f9148e80988e..b4f82e91c39c1 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormatTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813FlatVectorFormatTests.java @@ -12,8 +12,15 @@ import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.tests.index.BaseKnnVectorsFormatTestCase; +import org.elasticsearch.common.logging.LogConfigurator; public class ES813FlatVectorFormatTests extends BaseKnnVectorsFormatTestCase { + + static { + LogConfigurator.loadLog4jPlugins(); + LogConfigurator.configureESLogging(); // native access requires logging to be initialized + } + @Override protected Codec getCodec() { return new Lucene99Codec() { diff --git a/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813Int8FlatVectorFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813Int8FlatVectorFormatTests.java index 07a922efd21a6..7bb2e9e0284f1 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813Int8FlatVectorFormatTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES813Int8FlatVectorFormatTests.java @@ -12,8 +12,15 @@ import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.tests.index.BaseKnnVectorsFormatTestCase; +import org.elasticsearch.common.logging.LogConfigurator; public class ES813Int8FlatVectorFormatTests extends BaseKnnVectorsFormatTestCase { + + static { + LogConfigurator.loadLog4jPlugins(); + LogConfigurator.configureESLogging(); // native access requires logging to be initialized + } + @Override protected Codec getCodec() { return new Lucene99Codec() { diff --git a/server/src/test/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorFormatTests.java new file mode 100644 index 0000000000000..c9a5a8e76a041 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES815BitFlatVectorFormatTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.vectors; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.junit.Before; + +public class ES815BitFlatVectorFormatTests extends BaseKnnBitVectorsFormatTestCase { + + @Override + protected Codec getCodec() { + return new Lucene99Codec() { + @Override + public KnnVectorsFormat getKnnVectorsFormatForField(String field) { + return new ES815BitFlatVectorFormat(); + } + }; + } + + @Before + public void init() { + similarityFunction = VectorSimilarityFunction.EUCLIDEAN; + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/codec/vectors/ES815HnswBitVectorsFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES815HnswBitVectorsFormatTests.java new file mode 100644 index 0000000000000..3525d5b619565 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/vectors/ES815HnswBitVectorsFormatTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.vectors; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.junit.Before; + +public class ES815HnswBitVectorsFormatTests extends BaseKnnBitVectorsFormatTestCase { + + @Override + protected Codec getCodec() { + return new Lucene99Codec() { + @Override + public KnnVectorsFormat getKnnVectorsFormatForField(String field) { + return new ES815HnswBitVectorsFormat(); + } + }; + } + + @Before + public void init() { + similarityFunction = VectorSimilarityFunction.EUCLIDEAN; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java b/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java index 9c345eb923ab4..bff978f8e79d8 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/FlushListenersTests.java @@ -29,8 +29,8 @@ public void testFlushListenerCompletedImmediatelyIfFlushAlreadyOccurred() { ); flushListeners.afterFlush(generation, lastWriteLocation); Translog.Location waitLocation = new Translog.Location( - lastWriteLocation.generation - randomLongBetween(0, 2), - lastWriteLocation.generation - randomLongBetween(10, 90), + lastWriteLocation.generation() - randomLongBetween(0, 2), + lastWriteLocation.generation() - randomLongBetween(10, 90), 2 ); PlainActionFuture future = new PlainActionFuture<>(); @@ -48,8 +48,8 @@ public void testFlushListenerCompletedAfterLocationFlushed() { Integer.MAX_VALUE ); Translog.Location waitLocation = new Translog.Location( - lastWriteLocation.generation - randomLongBetween(0, 2), - lastWriteLocation.generation - randomLongBetween(10, 90), + lastWriteLocation.generation() - randomLongBetween(0, 2), + lastWriteLocation.generation() - randomLongBetween(10, 90), 2 ); PlainActionFuture future = new PlainActionFuture<>(); @@ -61,13 +61,13 @@ public void testFlushListenerCompletedAfterLocationFlushed() { long generation2 = generation + 1; Translog.Location secondLastWriteLocation = new Translog.Location( - lastWriteLocation.generation, - lastWriteLocation.translogLocation + 10, + lastWriteLocation.generation(), + lastWriteLocation.translogLocation() + 10, Integer.MAX_VALUE ); Translog.Location waitLocation2 = new Translog.Location( - lastWriteLocation.generation, - lastWriteLocation.translogLocation + 4, + lastWriteLocation.generation(), + lastWriteLocation.translogLocation() + 4, 2 ); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a89ac5bc5b74e..c668cfbb502a2 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1249,7 +1249,7 @@ public void testSyncTranslogConcurrently() throws Exception { SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( safeCommit.getIndexCommit().getUserData().entrySet() ); - assertThat(commitInfo.localCheckpoint, equalTo(engine.getProcessedLocalCheckpoint())); + assertThat(commitInfo.localCheckpoint(), equalTo(engine.getProcessedLocalCheckpoint())); } }; final Thread[] threads = new Thread[randomIntBetween(2, 4)]; @@ -3414,7 +3414,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog) throws I final long localCheckpoint = Long.parseLong( engine.getLastCommittedSegmentInfos().userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) ); - final long committedGen = engine.getTranslog().getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration; + final long committedGen = engine.getTranslog().getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration(); for (int gen = 1; gen < committedGen; gen++) { final Path genFile = translogPath.resolve(Translog.getFilename(gen)); assertFalse(genFile + " wasn't cleaned up", Files.exists(genFile)); @@ -3601,7 +3601,7 @@ public void testRecoverFromForeignTranslog() throws IOException { seqNo -> {} ); translog.add(TranslogOperationsUtils.indexOp("SomeBogusId", 0, primaryTerm.get())); - assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); + assertEquals(generation.translogFileGeneration(), translog.currentFileGeneration()); translog.close(); EngineConfig config = engine.config(); @@ -5232,7 +5232,7 @@ public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierExcepti * This sequence number landed in the last generation, but the lower and upper bounds for an earlier generation straddle * this sequence number. */ - assertThat(translog.getMinGenerationForSeqNo(3 * i + 1).translogFileGeneration, equalTo(i + generation)); + assertThat(translog.getMinGenerationForSeqNo(3 * i + 1).translogFileGeneration(), equalTo(i + generation)); } int i = 0; @@ -5855,7 +5855,7 @@ public void testShouldPeriodicallyFlushOnSize() throws Exception { final Translog translog = engine.getTranslog(); final IntSupplier uncommittedTranslogOperationsSinceLastCommit = () -> { long localCheckpoint = Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); - return translog.totalOperationsByMinGen(translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration); + return translog.totalOperationsByMinGen(translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration()); }; final long extraTranslogSizeInNewEngine = engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES; @@ -7417,7 +7417,7 @@ public void testMaxDocsOnPrimary() throws Exception { assertNotNull(result.getFailure()); assertThat( result.getFailure().getMessage(), - containsString("Number of documents in the index can't exceed [" + maxDocs + "]") + containsString("Number of documents in the shard cannot exceed [" + maxDocs + "]") ); assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index 8c583fe3976fa..1289095ae8d2c 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -141,7 +141,10 @@ public void testGetForFieldRuntimeField() { return (IndexFieldData.Builder) (cache, breakerService) -> null; }); SearchLookup searchLookup = new SearchLookup(null, null, (ctx, doc) -> null); - ifdService.getForField(ft, new FieldDataContext("qualified", () -> searchLookup, null, MappedFieldType.FielddataOperation.SEARCH)); + ifdService.getForField( + ft, + new FieldDataContext("qualified", null, () -> searchLookup, null, MappedFieldType.FielddataOperation.SEARCH) + ); assertSame(searchLookup, searchLookupSetOnce.get().get()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java index aacd98f656b45..77b37b2bde860 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java @@ -167,7 +167,7 @@ public void testDefaultsForTimeSeriesIndex() throws IOException { var source = source(TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE, b -> { b.field("field", Base64.getEncoder().encodeToString(randomByteArrayOfLength(10))); - b.field("@timestamp", randomMillisUpToYear9999()); + b.field("@timestamp", "2000-10-10T23:40:53.384Z"); b.field("dimension", "dimension1"); }, null); ParsedDocument doc = mapper.parse(source); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index f574d95304c0a..2f8af6c5521f9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -248,7 +248,7 @@ public void testParsingMinimal() throws Exception { Mapper fieldMapper = defaultMapper.mappers().getMapper("field"); ParsedDocument parsedDocument = defaultMapper.parse(source(b -> b.field("field", "suggestion"))); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertFieldsOfType(fields); } @@ -495,7 +495,7 @@ public void testParsingMultiValued() throws Exception { ParsedDocument parsedDocument = defaultMapper.parse(source(b -> b.array("field", "suggestion1", "suggestion2"))); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertThat(fields, containsInAnyOrder(suggestField("suggestion1"), suggestField("suggestion2"))); } @@ -512,7 +512,7 @@ public void testParsingWithWeight() throws Exception { b.endObject(); })); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertThat(fields, containsInAnyOrder(suggestField("suggestion"))); } @@ -529,7 +529,7 @@ public void testParsingMultiValueWithWeight() throws Exception { b.endObject(); })); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertThat(fields, containsInAnyOrder(suggestField("suggestion1"), suggestField("suggestion2"), suggestField("suggestion3"))); } @@ -566,7 +566,7 @@ public void testParsingWithGeoFieldAlias() throws Exception { b.endObject(); })); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertFieldsOfType(fields); } @@ -584,7 +584,7 @@ public void testParsingFull() throws Exception { b.endArray(); })); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertThat(fields, containsInAnyOrder(suggestField("suggestion1"), suggestField("suggestion2"), suggestField("suggestion3"))); } @@ -617,7 +617,7 @@ public void testParsingMixed() throws Exception { b.endArray(); })); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertThat( fields, containsInAnyOrder( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompositeRuntimeFieldTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompositeRuntimeFieldTests.java index 162b45cef971c..70a8fc05510ed 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompositeRuntimeFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompositeRuntimeFieldTests.java @@ -343,7 +343,7 @@ public void testParseDocumentSubFieldAccess() throws IOException { SearchLookup searchLookup = new SearchLookup( mapperService::fieldType, (mft, lookupSupplier, fdo) -> mft.fielddataBuilder( - new FieldDataContext("test", lookupSupplier, mapperService.mappingLookup()::sourcePaths, fdo) + new FieldDataContext("test", null, lookupSupplier, mapperService.mappingLookup()::sourcePaths, fdo) ).build(null, null), SourceProvider.fromStoredFields() ); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index 633ffbf1c3a3a..9609c1ee8aed5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -327,6 +327,7 @@ public void testEmptyDocumentMapper() { .item(IgnoredFieldMapper.class) .item(IgnoredSourceFieldMapper.class) .item(IndexFieldMapper.class) + .item(IndexModeFieldMapper.class) .item(NestedPathFieldMapper.class) .item(ProvidedIdFieldMapper.class) .item(RoutingFieldMapper.class) @@ -345,6 +346,7 @@ public void testEmptyDocumentMapper() { .item(IgnoredFieldMapper.CONTENT_TYPE) .item(IgnoredSourceFieldMapper.NAME) .item(IndexFieldMapper.CONTENT_TYPE) + .item(IndexModeFieldMapper.CONTENT_TYPE) .item(NestedPathFieldMapper.NAME) .item(RoutingFieldMapper.CONTENT_TYPE) .item(SeqNoFieldMapper.CONTENT_TYPE) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java index ab1c93cd98277..2826243e4c866 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java @@ -11,7 +11,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -81,4 +83,54 @@ public void testSwitchParser() throws IOException { assertEquals(parser, newContext.parser()); assertEquals("1", newContext.indexSettings().getSettings().get("index.mapping.total_fields.limit")); } + + public void testCreateDynamicMapperBuilderContextFromEmptyContext() throws IOException { + var resultFromEmptyParserContext = context.createDynamicMapperBuilderContext(); + + assertEquals("hey", resultFromEmptyParserContext.buildFullName("hey")); + assertFalse(resultFromEmptyParserContext.isSourceSynthetic()); + assertFalse(resultFromEmptyParserContext.isDataStream()); + assertFalse(resultFromEmptyParserContext.parentObjectContainsDimensions()); + assertEquals(ObjectMapper.Defaults.DYNAMIC, resultFromEmptyParserContext.getDynamic()); + assertEquals(MapperService.MergeReason.MAPPING_UPDATE, resultFromEmptyParserContext.getMergeReason()); + assertFalse(resultFromEmptyParserContext.isInNestedContext()); + } + + public void testCreateDynamicMapperBuilderContext() throws IOException { + var mapping = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("_doc") + .startObject("_source") + .field("mode", "synthetic") + .endObject() + .startObject(DataStreamTimestampFieldMapper.NAME) + .field("enabled", "true") + .endObject() + .startObject("properties") + .startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH) + .field("type", "date") + .endObject() + .startObject("foo") + .field("type", "passthrough") + .field("time_series_dimension", "true") + .field("priority", "100") + .endObject() + .endObject() + .endObject() + .endObject(); + var documentMapper = new MapperServiceTestCase() { + }.createDocumentMapper(mapping); + var parserContext = new TestDocumentParserContext(documentMapper.mappers(), null); + parserContext.path().add("foo"); + + var resultFromParserContext = parserContext.createDynamicMapperBuilderContext(); + + assertEquals("foo.hey", resultFromParserContext.buildFullName("hey")); + assertTrue(resultFromParserContext.isSourceSynthetic()); + assertTrue(resultFromParserContext.isDataStream()); + assertTrue(resultFromParserContext.parentObjectContainsDimensions()); + assertEquals(ObjectMapper.Defaults.DYNAMIC, resultFromParserContext.getDynamic()); + assertEquals(MapperService.MergeReason.MAPPING_UPDATE, resultFromParserContext.getMergeReason()); + assertFalse(resultFromParserContext.isInNestedContext()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index d417d6c647d05..7fa08acd53882 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -1568,7 +1568,7 @@ public void testSimpleMapper() throws Exception { b.endObject(); })).rootDoc(); - assertThat(doc.get(docMapper.mappers().getMapper("name.first").name()), equalTo("shay")); + assertThat(doc.get(docMapper.mappers().getMapper("name.first").fullPath()), equalTo("shay")); } public void testParseToJsonAndParse() throws Exception { @@ -1581,7 +1581,7 @@ public void testParseToJsonAndParse() throws Exception { BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); LuceneDocument doc = builtDocMapper.parse(new SourceToParse("1", json, XContentType.JSON)).rootDoc(); assertThat(doc.getBinaryValue(IdFieldMapper.NAME), equalTo(Uid.encodeId("1"))); - assertThat(doc.get(builtDocMapper.mappers().getMapper("name.first").name()), equalTo("shay")); + assertThat(doc.get(builtDocMapper.mappers().getMapper("name.first").fullPath()), equalTo("shay")); } public void testSimpleParser() throws Exception { @@ -1593,7 +1593,7 @@ public void testSimpleParser() throws Exception { BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); LuceneDocument doc = docMapper.parse(new SourceToParse("1", json, XContentType.JSON)).rootDoc(); assertThat(doc.getBinaryValue(IdFieldMapper.NAME), equalTo(Uid.encodeId("1"))); - assertThat(doc.get(docMapper.mappers().getMapper("name.first").name()), equalTo("shay")); + assertThat(doc.get(docMapper.mappers().getMapper("name.first").fullPath()), equalTo("shay")); } public void testSimpleParserNoTypeNoId() throws Exception { @@ -1602,7 +1602,7 @@ public void testSimpleParserNoTypeNoId() throws Exception { BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-notype-noid.json")); LuceneDocument doc = docMapper.parse(new SourceToParse("1", json, XContentType.JSON)).rootDoc(); assertThat(doc.getBinaryValue(IdFieldMapper.NAME), equalTo(Uid.encodeId("1"))); - assertThat(doc.get(docMapper.mappers().getMapper("name.first").name()), equalTo("shay")); + assertThat(doc.get(docMapper.mappers().getMapper("name.first").fullPath()), equalTo("shay")); } public void testAttributes() throws Exception { @@ -2605,8 +2605,8 @@ same name need to be part of the same mappings (hence the same document). If th assertTrue(barMapper instanceof ObjectMapper); Mapper baz = ((ObjectMapper) barMapper).getMapper("baz"); assertNotNull(baz); - assertEquals("foo.bar.baz", baz.name()); - assertEquals("baz", baz.simpleName()); + assertEquals("foo.bar.baz", baz.fullPath()); + assertEquals("baz", baz.leafName()); List fields = doc.rootDoc().getFields("foo.bar.baz"); assertEquals(2, fields.size()); String[] fieldStrings = fields.stream().map(Object::toString).toArray(String[]::new); @@ -3245,7 +3245,7 @@ protected String contentType() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return new StringStoredFieldFieldLoader(name(), simpleName(), null) { + return new StringStoredFieldFieldLoader(fullPath(), leafName(), null) { @Override protected void write(XContentBuilder b, Object value) throws IOException { BytesRef ref = (BytesRef) value; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index e8a8535017889..a138f0910e6ec 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -59,7 +59,7 @@ public XContentParser parser() { DynamicFieldsBuilder.DYNAMIC_TRUE.createDynamicFieldFromValue(ctx, fieldname); List dynamicMappers = ctx.getDynamicMappers(); assertEquals(1, dynamicMappers.size()); - assertEquals(fieldname, dynamicMappers.get(0).name()); + assertEquals(fieldname, dynamicMappers.get(0).fullPath()); assertEquals(expectedType, dynamicMappers.get(0).typeName()); } @@ -90,7 +90,7 @@ public XContentParser parser() { DynamicFieldsBuilder.DYNAMIC_TRUE.createDynamicFieldFromValue(ctx, "f1"); List dynamicMappers = ctx.getDynamicMappers(); assertEquals(1, dynamicMappers.size()); - assertEquals("labels.f1", dynamicMappers.get(0).name()); + assertEquals("labels.f1", dynamicMappers.get(0).fullPath()); assertEquals("keyword", dynamicMappers.get(0).typeName()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index d6b675ed0eb51..d913b86aed2d5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -159,7 +159,15 @@ public void testFieldAliasWithDifferentNestedScopes() { private static FieldMapper createFieldMapper(String parent, String name) { return new BooleanFieldMapper.Builder(name, ScriptCompiler.NONE, false, IndexVersion.current()).build( - new MapperBuilderContext(parent, false, false, false, ObjectMapper.Defaults.DYNAMIC, MapperService.MergeReason.MAPPING_UPDATE) + new MapperBuilderContext( + parent, + false, + false, + false, + ObjectMapper.Defaults.DYNAMIC, + MapperService.MergeReason.MAPPING_UPDATE, + false + ) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 04013bf01d57c..ad8f2c9f4f8af 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -493,9 +494,9 @@ public void testAddRootAliasForConflictingPassThroughFields() { ); FieldTypeLookup lookup = new FieldTypeLookup( - List.of(attributeField, resourceAttributeField), + randomizedList(attributeField, resourceAttributeField), List.of(), - List.of(attributes, resourceAttributes), + randomizedList(attributes, resourceAttributes), List.of() ); assertEquals(attributeField.fieldType(), lookup.get("foo")); @@ -503,10 +504,26 @@ public void testAddRootAliasForConflictingPassThroughFields() { public void testNoRootAliasForPassThroughFieldOnConflictingField() { MockFieldMapper attributeFoo = new MockFieldMapper("attributes.foo"); + MockFieldMapper resourceAttributeFoo = new MockFieldMapper("resource.attributes.foo"); MockFieldMapper foo = new MockFieldMapper("foo"); PassThroughObjectMapper attributes = createPassThroughMapper("attributes", Map.of("foo", attributeFoo), 0); + PassThroughObjectMapper resourceAttributes = createPassThroughMapper("resource.attributes", Map.of("foo", resourceAttributeFoo), 1); + + FieldTypeLookup lookup = new FieldTypeLookup( + randomizedList(foo, attributeFoo, resourceAttributeFoo), + List.of(), + randomizedList(attributes, resourceAttributes), + List.of() + ); - FieldTypeLookup lookup = new FieldTypeLookup(List.of(foo, attributeFoo), List.of(), List.of(attributes), List.of()); assertEquals(foo.fieldType(), lookup.get("foo")); } + + @SafeVarargs + @SuppressWarnings("varargs") + static List randomizedList(T... values) { + ArrayList list = new ArrayList<>(Arrays.asList(values)); + Collections.shuffle(list, random()); + return list; + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java deleted file mode 100644 index 92da99bc059a2..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.index.mapper; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.junit.AssumptionViolatedException; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; - -public class GeoShapeFieldMapperTests extends MapperTestCase { - - @Override - protected void registerParameters(ParameterChecker checker) throws IOException { - checker.registerUpdateCheck(b -> b.field("orientation", "right"), m -> { - GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper) m; - assertEquals(Orientation.RIGHT, gsfm.orientation()); - }); - checker.registerUpdateCheck(b -> b.field("ignore_z_value", false), m -> { - GeoShapeFieldMapper gpfm = (GeoShapeFieldMapper) m; - assertFalse(gpfm.ignoreZValue()); - }); - checker.registerUpdateCheck(b -> b.field("coerce", true), m -> { - GeoShapeFieldMapper gpfm = (GeoShapeFieldMapper) m; - assertTrue(gpfm.coerce.value()); - }); - } - - @Override - protected Collection getPlugins() { - return List.of(new TestGeoShapeFieldMapperPlugin()); - } - - @Override - protected void minimalMapping(XContentBuilder b) throws IOException { - b.field("type", "geo_shape"); - } - - @Override - protected boolean supportsStoredFields() { - return false; - } - - @Override - protected Object getSampleValueForDocument() { - return "POINT (14.0 15.0)"; - } - - public void testDefaultConfiguration() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - Mapper fieldMapper = mapper.mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper; - assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(Orientation.RIGHT)); - assertThat(geoShapeFieldMapper.fieldType().hasDocValues(), equalTo(false)); - } - - /** - * Test that orientation parameter correctly parses - */ - public void testOrientationParsing() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("orientation", "left"))); - Mapper fieldMapper = mapper.mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - - Orientation orientation = ((GeoShapeFieldMapper) fieldMapper).fieldType().orientation(); - assertThat(orientation, equalTo(Orientation.CLOCKWISE)); - assertThat(orientation, equalTo(Orientation.LEFT)); - assertThat(orientation, equalTo(Orientation.CW)); - - // explicit right orientation test - mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("orientation", "right"))); - fieldMapper = mapper.mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - - orientation = ((GeoShapeFieldMapper) fieldMapper).fieldType().orientation(); - assertThat(orientation, equalTo(Orientation.COUNTER_CLOCKWISE)); - assertThat(orientation, equalTo(Orientation.RIGHT)); - assertThat(orientation, equalTo(Orientation.CCW)); - } - - /** - * Test that coerce parameter correctly parses - */ - public void testCoerceParsing() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("coerce", true))); - Mapper fieldMapper = mapper.mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - boolean coerce = ((GeoShapeFieldMapper) fieldMapper).coerce(); - assertThat(coerce, equalTo(true)); - - // explicit false coerce test - mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("coerce", false))); - fieldMapper = mapper.mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - coerce = ((GeoShapeFieldMapper) fieldMapper).coerce(); - assertThat(coerce, equalTo(false)); - } - - /** - * Test that accept_z_value parameter correctly parses - */ - public void testIgnoreZValue() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("ignore_z_value", true))); - Mapper fieldMapper = mapper.mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - - boolean ignoreZValue = ((GeoShapeFieldMapper) fieldMapper).ignoreZValue(); - assertThat(ignoreZValue, equalTo(true)); - - // explicit false accept_z_value test - mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("ignore_z_value", false))); - fieldMapper = mapper.mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - - ignoreZValue = ((GeoShapeFieldMapper) fieldMapper).ignoreZValue(); - assertThat(ignoreZValue, equalTo(false)); - } - - @Override - protected boolean supportsIgnoreMalformed() { - return true; - } - - @Override - protected List exampleMalformedValues() { - return List.of( - exampleMalformedValue("Bad shape").errorMatches("Unknown geometry type: bad"), - exampleMalformedValue( - "POLYGON ((18.9401790919516 -33.9681188869036, 18.9401790919516 -33.9681188869036, 18.9401790919517 " - + "-33.9681188869036, 18.9401790919517 -33.9681188869036, 18.9401790919516 -33.9681188869036))" - ).errorMatches("at least three non-collinear points required") - ); - } - - public void testGeoShapeMapperMerge() throws Exception { - MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "geo_shape").field("orientation", "ccw"))); - Mapper fieldMapper = mapperService.documentMapper().mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper; - assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(Orientation.CCW)); - - // change mapping; orientation - merge(mapperService, fieldMapping(b -> b.field("type", "geo_shape").field("orientation", "cw"))); - fieldMapper = mapperService.documentMapper().mappers().getMapper("field"); - assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); - geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper; - assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(Orientation.CW)); - } - - public void testSerializeDefaults() throws Exception { - DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - assertThat( - Strings.toString( - mapper.mappers().getMapper("field"), - new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")) - ), - containsString("\"orientation\":\"" + Orientation.RIGHT + "\"") - ); - } - - public void testGeoShapeArrayParsing() throws Exception { - DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - ParsedDocument document = mapper.parse(source(b -> { - b.startArray("field"); - { - b.startObject().field("type", "Point").startArray("coordinates").value(176.0).value(15.0).endArray().endObject(); - b.startObject().field("type", "Point").startArray("coordinates").value(76.0).value(-15.0).endArray().endObject(); - } - b.endArray(); - })); - assertThat(document.docs(), hasSize(1)); - assertThat(document.docs().get(0).getFields("field"), hasSize(2)); - } - - public void testMultiFieldsDeprecationWarning() throws Exception { - createDocumentMapper(fieldMapping(b -> { - minimalMapping(b); - b.startObject("fields"); - b.startObject("keyword").field("type", "keyword").endObject(); - b.endObject(); - })); - assertWarnings("Adding multifields to [geo_shape] mappers has no effect and will be forbidden in future"); - } - - @Override - protected boolean supportsMeta() { - return false; - } - - protected void assertSearchable(MappedFieldType fieldType) { - // always searchable even if it uses TextSearchInfo.NONE - assertTrue(fieldType.isIndexed()); - assertTrue(fieldType.isSearchable()); - } - - @Override - protected Object generateRandomInputValue(MappedFieldType ft) { - assumeFalse("Test implemented in a follow up", true); - return null; - } - - @Override - protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { - throw new AssumptionViolatedException("not supported"); - } - - @Override - protected IngestScriptSupport ingestScriptSupport() { - throw new AssumptionViolatedException("not supported"); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldTypeTests.java deleted file mode 100644 index b4dce62d16f37..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldTypeTests.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.index.mapper; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -public class GeoShapeFieldTypeTests extends FieldTypeTestCase { - - public void testFetchSourceValue() throws IOException { - MappedFieldType mapper = new GeoShapeFieldMapper.Builder("field", true, true).build(MapperBuilderContext.root(false, false)) - .fieldType(); - - Map jsonLineString = Map.of("type", "LineString", "coordinates", List.of(List.of(42.0, 27.1), List.of(30.0, 50.0))); - Map jsonPoint = Map.of("type", "Point", "coordinates", List.of(14.0, 15.0)); - Map jsonMalformed = Map.of("type", "Point", "coordinates", "foo"); - String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; - String wktPoint = "POINT (14.0 15.0)"; - String wktMalformed = "POINT foo"; - - // Test a single shape in geojson format. - Object sourceValue = jsonLineString; - assertEquals(List.of(jsonLineString), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(wktLineString), fetchSourceValue(mapper, sourceValue, "wkt")); - - // Test a malformed single shape in geojson format - sourceValue = jsonMalformed; - assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); - - // Test a list of shapes in geojson format. - sourceValue = List.of(jsonLineString, jsonPoint); - assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); - - // Test a list of shapes including one malformed in geojson format - sourceValue = List.of(jsonLineString, jsonMalformed, jsonPoint); - assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); - - // Test a single shape in wkt format. - sourceValue = wktLineString; - assertEquals(List.of(jsonLineString), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(wktLineString), fetchSourceValue(mapper, sourceValue, "wkt")); - - // Test a single malformed shape in wkt format - sourceValue = wktMalformed; - assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); - - // Test a list of shapes in wkt format. - sourceValue = List.of(wktLineString, wktPoint); - assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); - - // Test a list of shapes including one malformed in wkt format - sourceValue = List.of(wktLineString, wktMalformed, wktPoint); - assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); - assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IndexModeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IndexModeFieldTypeTests.java new file mode 100644 index 0000000000000..72ff053dfd430 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/IndexModeFieldTypeTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.query.SearchExecutionContext; + +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; + +public class IndexModeFieldTypeTests extends ConstantFieldTypeTestCase { + + public void testTermQuery() { + MappedFieldType ft = getMappedFieldType(); + for (IndexMode mode : IndexMode.values()) { + SearchExecutionContext context = createContext(mode); + for (IndexMode other : IndexMode.values()) { + Query query = ft.termQuery(other.getName(), context); + if (other.equals(mode)) { + assertEquals(new MatchAllDocsQuery(), query); + } else { + assertEquals(new MatchNoDocsQuery(), query); + } + } + } + } + + public void testWildcardQuery() { + MappedFieldType ft = getMappedFieldType(); + + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("stand*", null, createContext(IndexMode.STANDARD))); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("stand*", null, createContext(IndexMode.TIME_SERIES))); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("stand*", null, createContext(IndexMode.LOGS))); + + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("time*", null, createContext(IndexMode.STANDARD))); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("time*", null, createContext(IndexMode.TIME_SERIES))); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("time*", null, createContext(IndexMode.LOGS))); + + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("logs*", null, createContext(IndexMode.STANDARD))); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("logs*", null, createContext(IndexMode.TIME_SERIES))); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("logs*", null, createContext(IndexMode.LOGS))); + } + + @Override + public MappedFieldType getMappedFieldType() { + return IndexModeFieldMapper.IndexModeFieldType.INSTANCE; + } + + private SearchExecutionContext createContext(IndexMode mode) { + Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()); + if (mode != null) { + settings.put(IndexSettings.MODE.getKey(), mode); + } + if (mode == IndexMode.TIME_SERIES) { + settings.putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("a,b,c")); + } + IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, settings.build()); + + Predicate indexNameMatcher = pattern -> Regex.simpleMatch(pattern, "index"); + return new SearchExecutionContext( + 0, + 0, + indexSettings, + null, + null, + null, + MappingLookup.EMPTY, + null, + null, + parserConfig(), + writableRegistry(), + null, + null, + System::currentTimeMillis, + null, + indexNameMatcher, + () -> true, + null, + Collections.emptyMap(), + MapperMetrics.NOOP + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index e06ed1736cca2..833b0a60827d0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -320,15 +320,13 @@ public void testDimension() throws IOException { assertDimension(false, KeywordFieldMapper.KeywordFieldType::isDimension); } - public void testDimensionAndIgnoreAbove() { - Exception e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(fieldMapping(b -> { + public void testDimensionAndIgnoreAbove() throws IOException { + DocumentMapper documentMapper = createDocumentMapper(fieldMapping(b -> { minimalMapping(b); b.field("time_series_dimension", true).field("ignore_above", 2048); - }))); - assertThat( - e.getCause().getMessage(), - containsString("Field [ignore_above] cannot be set in conjunction with field [time_series_dimension]") - ); + })); + KeywordFieldMapper field = (KeywordFieldMapper) documentMapper.mappers().getMapper("field"); + assertEquals(2048, field.fieldType().ignoreAbove()); } public void testDimensionAndNormalizer() { @@ -641,11 +639,30 @@ public void testKeywordFieldUtf8LongerThan32766SourceOnly() throws Exception { mapper.parse(source(b -> b.field("field", stringBuilder.toString()))); } + /** + * Test that we track the synthetic source if field is neither indexed nor has doc values nor stored + */ + public void testSyntheticSourceForDisabledField() throws Exception { + MapperService mapper = createMapperService( + syntheticSourceFieldMapping( + b -> b.field("type", "keyword").field("index", false).field("doc_values", false).field("store", false) + ) + ); + String value = randomAlphaOfLengthBetween(1, 20); + assertEquals("{\"field\":\"" + value + "\"}", syntheticSource(mapper.documentMapper(), b -> b.field("field", value))); + } + @Override protected boolean supportsIgnoreMalformed() { return false; } + @Override + protected BlockReaderSupport getSupportedReaders(MapperService mapper, String loaderFieldName) { + MappedFieldType ft = mapper.fieldType(loaderFieldName); + return new BlockReaderSupport(ft.hasDocValues(), ft.hasDocValues() || ft.isStored(), mapper, loaderFieldName); + } + @Override protected Function loadBlockExpected() { return v -> ((BytesRef) v).utf8ToString(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperBuilderContextTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperBuilderContextTests.java index 8c9197b0f3173..24d070cbd0609 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperBuilderContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperBuilderContextTests.java @@ -27,4 +27,11 @@ public void testRootWithMergeReason() { assertEquals(mergeReason, root.getMergeReason()); } + public void tesIsInNestedContext() { + MapperBuilderContext root = MapperBuilderContext.root(true, false); + assertFalse(root.isInNestedContext()); + + MapperBuilderContext childContext = root.createChildContext("child", ObjectMapper.Dynamic.FALSE); + assertFalse(childContext.isInNestedContext()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index bb5d50267642f..fbca1484cc7c7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -125,27 +125,52 @@ public void testPartitionedConstraints() throws IOException { } public void testIndexSortWithNestedFields() throws IOException { - Settings settings = Settings.builder().put("index.sort.field", "foo").build(); + IndexVersion oldVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.INDEX_SORTING_ON_NESTED); IllegalArgumentException invalidNestedException = expectThrows( IllegalArgumentException.class, - () -> createMapperService(settings, mapping(b -> { + () -> createMapperService(oldVersion, settings(oldVersion).put("index.sort.field", "foo").build(), () -> true, mapping(b -> { b.startObject("nested_field").field("type", "nested").endObject(); b.startObject("foo").field("type", "keyword").endObject(); })) ); - assertThat(invalidNestedException.getMessage(), containsString("cannot have nested fields when index sort is activated")); + Settings settings = settings(IndexVersions.INDEX_SORTING_ON_NESTED).put("index.sort.field", "foo").build(); + DocumentMapper mapper = createMapperService(settings, mapping(b -> { + b.startObject("nested_field").field("type", "nested").endObject(); + b.startObject("foo").field("type", "keyword").endObject(); + })).documentMapper(); + + List docs = mapper.parse(source(b -> { + b.field("name", "foo"); + b.startObject("nested_field").field("foo", "bar").endObject(); + })).docs(); + assertEquals(2, docs.size()); + assertEquals(docs.get(1), docs.get(0).getParent()); MapperService mapperService = createMapperService( settings, mapping(b -> b.startObject("foo").field("type", "keyword").endObject()) ); - invalidNestedException = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> { + merge(mapperService, mapping(b -> { b.startObject("nested_field"); b.field("type", "nested"); b.endObject(); + })); + + Settings settings2 = Settings.builder().put("index.sort.field", "foo.bar").build(); + invalidNestedException = expectThrows(IllegalArgumentException.class, () -> createMapperService(settings2, mapping(b -> { + b.startObject("foo"); + { + b.field("type", "nested"); + b.startObject("properties"); + { + b.startObject("bar").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); }))); - assertThat(invalidNestedException.getMessage(), containsString("cannot have nested fields when index sort is activated")); + assertEquals("cannot apply index sort to field [foo.bar] under nested object [foo]", invalidNestedException.getMessage()); } public void testFieldAliasWithMismatchedNestedScope() throws Throwable { @@ -273,7 +298,7 @@ public void testMappingRecoverySkipFieldNameLengthLimit() throws Throwable { DocumentMapper documentMapper = mapperService.merge("_doc", mapping, MergeReason.MAPPING_RECOVERY); - assertEquals(testString, documentMapper.mappers().getMapper(testString).simpleName()); + assertEquals(testString, documentMapper.mappers().getMapper(testString).leafName()); } public void testIsMetadataField() throws IOException { @@ -1473,7 +1498,7 @@ public void testMultiFieldsUpdate() throws IOException { assertNotNull(mapper.mappers().getMapper("text_field")); FieldMapper.MultiFields multiFields = ((TextFieldMapper) mapper.mappers().getMapper("text_field")).multiFields(); Map multiFieldMap = StreamSupport.stream(multiFields.spliterator(), false) - .collect(Collectors.toMap(FieldMapper::name, Function.identity())); + .collect(Collectors.toMap(FieldMapper::fullPath, Function.identity())); assertThat(multiFieldMap.keySet(), contains("text_field.multi_field1")); assertTrue(multiFieldMap.get("text_field.multi_field1").ignoreMalformed()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java index 5b636c985f695..0946d547c0ad8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupInferenceFieldMapperTests.java @@ -101,7 +101,7 @@ private static class TestInferenceFieldMapper extends FieldMapper implements Inf @Override public InferenceFieldMetadata getMetadata(Set sourcePaths) { - return new InferenceFieldMetadata(name(), INFERENCE_ID, sourcePaths.toArray(new String[0])); + return new InferenceFieldMetadata(fullPath(), INFERENCE_ID, sourcePaths.toArray(new String[0])); } @Override @@ -114,7 +114,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio @Override public Builder getMergeBuilder() { - return new Builder(simpleName()); + return new Builder(leafName()); } @Override @@ -135,7 +135,7 @@ protected Parameter[] getParameters() { @Override public FieldMapper build(MapperBuilderContext context) { - return new TestInferenceFieldMapper(name()); + return new TestInferenceFieldMapper(leafName()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index aa22a345c5cec..0231c5d61d885 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -206,27 +206,27 @@ to get the wrong path (missing the first portion). assertEquals(1, mapping.getRoot().mappers.size()); Mapper object = mapping.getRoot().getMapper("obj"); assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); - assertEquals("obj", object.simpleName()); - assertEquals("obj", object.name()); + assertEquals("obj", object.leafName()); + assertEquals("obj", object.fullPath()); ObjectMapper objectMapper = (ObjectMapper) object; assertEquals(1, objectMapper.mappers.size()); object = objectMapper.getMapper("source"); assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); - assertEquals("source", object.simpleName()); - assertEquals("obj.source", object.name()); + assertEquals("source", object.leafName()); + assertEquals("obj.source", object.fullPath()); objectMapper = (ObjectMapper) object; assertEquals(1, objectMapper.mappers.size()); object = objectMapper.getMapper("geo"); assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); - assertEquals("geo", object.simpleName()); - assertEquals("obj.source.geo", object.name()); + assertEquals("geo", object.leafName()); + assertEquals("obj.source.geo", object.fullPath()); objectMapper = (ObjectMapper) object; assertEquals(1, objectMapper.mappers.size()); Mapper location = objectMapper.getMapper("location"); assertThat(location, CoreMatchers.instanceOf(GeoPointFieldMapper.class)); GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) location; - assertEquals("obj.source.geo.location", geoPointFieldMapper.name()); - assertEquals("location", geoPointFieldMapper.simpleName()); + assertEquals("obj.source.geo.location", geoPointFieldMapper.fullPath()); + assertEquals("location", geoPointFieldMapper.leafName()); assertEquals("obj.source.geo.location", geoPointFieldMapper.mappedFieldType.name()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java index 6446033c07c5b..7f176bc26072f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java @@ -222,7 +222,7 @@ public void testSourcePathFields() throws IOException { Mapper mapper = mapperService.mappingLookup().getMapper("field"); assertThat(mapper, instanceOf(FieldMapper.class)); final Set fieldsUsingSourcePath = new HashSet<>(); - ((FieldMapper) mapper).sourcePathUsedBy().forEachRemaining(mapper1 -> fieldsUsingSourcePath.add(mapper1.name())); + ((FieldMapper) mapper).sourcePathUsedBy().forEachRemaining(mapper1 -> fieldsUsingSourcePath.add(mapper1.fullPath())); assertThat(fieldsUsingSourcePath, equalTo(Set.of("field.subfield1", "field.subfield2"))); assertThat(mapperService.mappingLookup().sourcePaths("field.subfield1"), equalTo(Set.of("field"))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 289f12d1508f9..306887099849b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1755,4 +1755,91 @@ private NestedObjectMapper createNestedObjectMapperWithAllParametersSet(CheckedC })); return (NestedObjectMapper) mapper.mapping().getRoot().getMapper("nested_object"); } + + public void testNestedMapperBuilderContextConstructor() { + boolean isSourceSynthetic = randomBoolean(); + boolean isDataStream = randomBoolean(); + boolean parentContainsDimensions = randomBoolean(); + MergeReason mergeReason = randomFrom(MergeReason.values()); + MapperBuilderContext mapperBuilderContext = MapperBuilderContext.root(isSourceSynthetic, isDataStream, mergeReason); + mapperBuilderContext = mapperBuilderContext.createChildContext("name", parentContainsDimensions, randomFrom(Dynamic.values())); + NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder("name", IndexVersion.current(), query -> null); + builder.add(new Mapper.Builder("name") { + @Override + public Mapper build(MapperBuilderContext context) { + assertEquals(isSourceSynthetic, context.isSourceSynthetic()); + assertEquals(isDataStream, context.isDataStream()); + assertEquals(parentContainsDimensions, context.parentObjectContainsDimensions()); + return new MockFieldMapper("name"); + } + }); + NestedObjectMapper nestedObjectMapper = builder.build(mapperBuilderContext); + assertNotNull(nestedObjectMapper.getMapper("name")); + } + + public void testNestedMapperMergeContextRootConstructor() { + boolean isSourceSynthetic = randomBoolean(); + boolean isDataStream = randomBoolean(); + boolean parentContainsDimensions = randomBoolean(); + MergeReason mergeReason = randomFrom(MergeReason.values()); + { + MapperBuilderContext mapperBuilderContext = MapperBuilderContext.root(false, false, mergeReason); + NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder("name", IndexVersion.current(), query -> null); + NestedObjectMapper nestedObjectMapper = builder.build(mapperBuilderContext); + MapperMergeContext mapperMergeContext = MapperMergeContext.root(isSourceSynthetic, isDataStream, mergeReason, randomLong()); + MapperMergeContext childMergeContext = nestedObjectMapper.createChildContext(mapperMergeContext, "name"); + MapperBuilderContext nestedBuilderContext = childMergeContext.getMapperBuilderContext(); + assertEquals(isSourceSynthetic, nestedBuilderContext.isSourceSynthetic()); + assertEquals(isDataStream, nestedBuilderContext.isDataStream()); + } + { + MapperBuilderContext mapperBuilderContext = MapperBuilderContext.root(isSourceSynthetic, isDataStream, mergeReason); + MapperMergeContext mapperMergeContext = MapperMergeContext.root(isSourceSynthetic, isDataStream, mergeReason, randomLong()); + MapperBuilderContext childMapperBuilderContext = mapperBuilderContext.createChildContext( + "name", + parentContainsDimensions, + randomFrom(Dynamic.values()) + ); + MapperMergeContext childMergeContext = mapperMergeContext.createChildContext(childMapperBuilderContext); + MapperBuilderContext nestedBuilderContext = childMergeContext.getMapperBuilderContext(); + assertEquals(isSourceSynthetic, nestedBuilderContext.isSourceSynthetic()); + assertEquals(isDataStream, nestedBuilderContext.isDataStream()); + assertEquals(parentContainsDimensions, nestedBuilderContext.parentObjectContainsDimensions()); + } + } + + public void testNestedMapperMergeContextFromConstructor() { + boolean isSourceSynthetic = randomBoolean(); + boolean isDataStream = randomBoolean(); + boolean parentContainsDimensions = randomBoolean(); + MergeReason mergeReason = randomFrom(MergeReason.values()); + MapperBuilderContext mapperBuilderContext = MapperBuilderContext.root(isSourceSynthetic, isDataStream, mergeReason); + mapperBuilderContext = mapperBuilderContext.createChildContext("name", parentContainsDimensions, randomFrom(Dynamic.values())); + NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder("name", IndexVersion.current(), query -> null); + NestedObjectMapper nestedObjectMapper = builder.build(mapperBuilderContext); + + MapperMergeContext mapperMergeContext = MapperMergeContext.from(mapperBuilderContext, randomLong()); + MapperMergeContext childMergeContext = nestedObjectMapper.createChildContext(mapperMergeContext, "name"); + MapperBuilderContext nestedBuilderContext = childMergeContext.getMapperBuilderContext(); + assertEquals(isSourceSynthetic, nestedBuilderContext.isSourceSynthetic()); + assertEquals(isDataStream, nestedBuilderContext.isDataStream()); + assertEquals(parentContainsDimensions, nestedBuilderContext.parentObjectContainsDimensions()); + } + + public void testIsInNestedContext() { + NestedObjectMapper.NestedMapperBuilderContext context = new NestedObjectMapper.NestedMapperBuilderContext( + "nested_path", + false, + false, + false, + null, + false, + Dynamic.FALSE, + MergeReason.INDEX_TEMPLATE + ); + assertTrue(context.isInNestedContext()); + + MapperBuilderContext childContext = context.createChildContext("child", false, Dynamic.FALSE); + assertTrue(childContext.isInNestedContext()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTests.java index 7b8486e5050c2..8c64868045490 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTests.java @@ -64,7 +64,7 @@ protected Parameter[] getParameters() { @Override public NonDynamicFieldMapper build(MapperBuilderContext context) { - return new NonDynamicFieldMapper(name(), new TextFieldMapper.TextFieldType(name(), false, true, meta.getValue())); + return new NonDynamicFieldMapper(leafName(), new TextFieldMapper.TextFieldType(leafName(), false, true, meta.getValue())); } } @@ -82,7 +82,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 25ef3c8550ec0..b3bb8cbe697a5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -53,11 +53,11 @@ public void testMerge() { final ObjectMapper mergedFoo = (ObjectMapper) merged.getMapper("foo"); { Mapper bar = mergedFoo.getMapper("bar"); - assertEquals("bar", bar.simpleName()); - assertEquals("foo.bar", bar.name()); + assertEquals("bar", bar.leafName()); + assertEquals("foo.bar", bar.fullPath()); Mapper baz = mergedFoo.getMapper("baz"); - assertEquals("baz", baz.simpleName()); - assertEquals("foo.baz", baz.name()); + assertEquals("baz", baz.leafName()); + assertEquals("foo.baz", baz.fullPath()); } } @@ -139,8 +139,8 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); final KeywordFieldMapper keywordFieldMapper = (KeywordFieldMapper) merged.getMapper("host.name"); - assertEquals("host.name", keywordFieldMapper.name()); - assertEquals("host.name", keywordFieldMapper.simpleName()); + assertEquals("host.name", keywordFieldMapper.fullPath()); + assertEquals("host.name", keywordFieldMapper.leafName()); } public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { @@ -156,8 +156,8 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { ObjectMapper foo = (ObjectMapper) merged.getMapper("foo"); ObjectMapper metrics = (ObjectMapper) foo.getMapper("metrics"); final KeywordFieldMapper keywordFieldMapper = (KeywordFieldMapper) metrics.getMapper("host.name"); - assertEquals("foo.metrics.host.name", keywordFieldMapper.name()); - assertEquals("host.name", keywordFieldMapper.simpleName()); + assertEquals("foo.metrics.host.name", keywordFieldMapper.fullPath()); + assertEquals("host.name", keywordFieldMapper.leafName()); } public void testMergedFieldNamesMultiFields() { @@ -169,11 +169,11 @@ public void testMergedFieldNamesMultiFields() { final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); TextFieldMapper text = (TextFieldMapper) merged.getMapper("text"); - assertEquals("text", text.name()); - assertEquals("text", text.simpleName()); + assertEquals("text", text.fullPath()); + assertEquals("text", text.leafName()); KeywordFieldMapper keyword = (KeywordFieldMapper) text.multiFields().iterator().next(); - assertEquals("text.keyword", keyword.name()); - assertEquals("keyword", keyword.simpleName()); + assertEquals("text.keyword", keyword.fullPath()); + assertEquals("keyword", keyword.leafName()); } public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { @@ -189,11 +189,11 @@ public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { ObjectMapper foo = (ObjectMapper) merged.getMapper("foo"); ObjectMapper metrics = (ObjectMapper) foo.getMapper("metrics"); final TextFieldMapper textFieldMapper = (TextFieldMapper) metrics.getMapper("host.name"); - assertEquals("foo.metrics.host.name", textFieldMapper.name()); - assertEquals("host.name", textFieldMapper.simpleName()); + assertEquals("foo.metrics.host.name", textFieldMapper.fullPath()); + assertEquals("host.name", textFieldMapper.leafName()); FieldMapper fieldMapper = textFieldMapper.multiFields.iterator().next(); - assertEquals("foo.metrics.host.name.keyword", fieldMapper.name()); - assertEquals("keyword", fieldMapper.simpleName()); + assertEquals("foo.metrics.host.name.keyword", fieldMapper.fullPath()); + assertEquals("keyword", fieldMapper.leafName()); } public void testMergeWithLimit() { @@ -324,8 +324,8 @@ public void testMergeSubobjectsFalseWithObject() { private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { FieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.current()); FieldMapper fieldMapper = fieldBuilder.build(MapperBuilderContext.root(false, false)); - assertEquals("host.name", fieldMapper.simpleName()); - assertEquals("host.name", fieldMapper.name()); + assertEquals("host.name", fieldMapper.leafName()); + assertEquals("host.name", fieldMapper.fullPath()); return new RootObjectMapper.Builder("_doc", Explicit.EXPLICIT_FALSE).add(fieldBuilder) .build(MapperBuilderContext.root(false, false)); } @@ -339,11 +339,12 @@ private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { false, false, ObjectMapper.Defaults.DYNAMIC, - MapperService.MergeReason.MAPPING_UPDATE + MapperService.MergeReason.MAPPING_UPDATE, + false ) ); - assertEquals("host.name", fieldMapper.simpleName()); - assertEquals("foo.metrics.host.name", fieldMapper.name()); + assertEquals("host.name", fieldMapper.leafName()); + assertEquals("foo.metrics.host.name", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) ); @@ -358,14 +359,15 @@ private ObjectMapper.Builder createObjectSubobjectsFalseLeafWithMultiField() { false, false, ObjectMapper.Defaults.DYNAMIC, - MapperService.MergeReason.MAPPING_UPDATE + MapperService.MergeReason.MAPPING_UPDATE, + false ) ); - assertEquals("host.name", textKeywordMultiField.simpleName()); - assertEquals("foo.metrics.host.name", textKeywordMultiField.name()); + assertEquals("host.name", textKeywordMultiField.leafName()); + assertEquals("foo.metrics.host.name", textKeywordMultiField.fullPath()); FieldMapper fieldMapper = textKeywordMultiField.multiFields.iterator().next(); - assertEquals("keyword", fieldMapper.simpleName()); - assertEquals("foo.metrics.host.name.keyword", fieldMapper.name()); + assertEquals("keyword", fieldMapper.leafName()); + assertEquals("foo.metrics.host.name.keyword", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) ); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 505b89bf28601..6687a28883716 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -607,7 +607,7 @@ public void testFlatten() { new KeywordFieldMapper.Builder("keyword2", IndexVersion.current()) ) ).add(new KeywordFieldMapper.Builder("keyword1", IndexVersion.current())).build(rootContext); - List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::name).toList(); + List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java index 308f775ec7b28..55761e5ec339d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java @@ -176,7 +176,7 @@ protected Parameter[] getParameters() { @Override public FieldMapper build(MapperBuilderContext context) { - return new TestMapper(name(), context.buildFullName(name()), multiFieldsBuilder.build(this, context), copyTo, this); + return new TestMapper(leafName(), context.buildFullName(leafName()), multiFieldsBuilder.build(this, context), copyTo, this); } } @@ -228,7 +228,7 @@ protected TestMapper( @Override public Builder getMergeBuilder() { - return new ParametrizedMapperTests.Builder(simpleName()).init(this); + return new ParametrizedMapperTests.Builder(leafName()).init(this); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 50d15be2256ed..4f7951c543909 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -294,7 +294,7 @@ public void testStoreParameterDefaults() throws IOException { var source = source(TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE, b -> { b.field("field", "1234"); if (timeSeriesIndexMode) { - b.field("@timestamp", randomMillisUpToYear9999()); + b.field("@timestamp", "2000-10-10T23:40:53.384Z"); b.field("dimension", "dimension1"); } }, null); @@ -599,7 +599,7 @@ public void testFielddata() throws IOException { Exception e = expectThrows( IllegalArgumentException.class, () -> disabledMapper.fieldType("field") - .fielddataBuilder(new FieldDataContext("index", null, null, MappedFieldType.FielddataOperation.SEARCH)) + .fielddataBuilder(new FieldDataContext("index", null, null, null, MappedFieldType.FielddataOperation.SEARCH)) ); assertThat( e.getMessage(), @@ -1309,7 +1309,7 @@ public void testEmpty() throws Exception { } : SourceProvider.fromStoredFields(); SearchLookup searchLookup = new SearchLookup(null, null, sourceProvider); IndexFieldData sfd = ft.fielddataBuilder( - new FieldDataContext("", () -> searchLookup, Set::of, MappedFieldType.FielddataOperation.SCRIPT) + new FieldDataContext("", null, () -> searchLookup, Set::of, MappedFieldType.FielddataOperation.SCRIPT) ).build(null, null); LeafFieldData lfd = sfd.load(getOnlyLeafReader(searcher.getIndexReader()).getContext()); TextDocValuesField scriptDV = (TextDocValuesField) lfd.getScriptFieldFactory("field"); @@ -1336,12 +1336,7 @@ public void testBlockLoaderParentFromRowStrideReader() throws IOException { private void testBlockLoaderFromParent(boolean columnReader, boolean syntheticSource) throws IOException { boolean storeParent = randomBoolean(); - KeywordFieldSyntheticSourceSupport kwdSupport = new KeywordFieldSyntheticSourceSupport( - null, - storeParent, - null, - false == storeParent - ); + KeywordFieldSyntheticSourceSupport kwdSupport = new KeywordFieldSyntheticSourceSupport(null, storeParent, null, false); SyntheticSourceExample example = kwdSupport.example(5); CheckedConsumer buildFields = b -> { b.startObject("field"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java index 87b107d5bd139..f05ec95fe84cb 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -67,7 +67,7 @@ private DocumentMapper createDocumentMapper(String routingPath, XContentBuilder .put(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING.getKey(), 200) // Allow tests that use many dimensions .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPath) .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2021-04-28T00:00:00Z") - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2021-04-29T00:00:00Z") + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2021-10-29T00:00:00Z") .build(), mappings ).documentMapper(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java index 5352bd446a80b..7ac17020f73cd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapperTests.java @@ -44,7 +44,7 @@ private DocumentMapper createMapper(XContentBuilder mappings) throws IOException getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name()) .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "routing path is required") .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2021-04-28T00:00:00Z") - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2021-04-29T00:00:00Z") + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2021-10-29T00:00:00Z") .build(), mappings ).documentMapper(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java index 4f23c86f53cca..aba20ec5d81c8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java @@ -698,12 +698,12 @@ private static void randomMapExample(final TreeMap example, int for (int i = 0; i < randomIntBetween(2, 5); i++) { int j = depth >= maxDepth ? randomIntBetween(1, 2) : randomIntBetween(1, 3); switch (j) { - case 1 -> example.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + case 1 -> example.put(randomAlphaOfLength(10), randomAlphaOfLengthBetween(1, 10)); case 2 -> { int size = randomIntBetween(2, 10); final Set stringSet = new HashSet<>(); while (stringSet.size() < size) { - stringSet.add(String.valueOf(randomIntBetween(10_000, 20_000))); + stringSet.add(String.valueOf(randomIntBetween(10_000, 2_000_000))); } final List randomList = new ArrayList<>(stringSet); Collections.sort(randomList); @@ -720,10 +720,10 @@ private static void randomMapExample(final TreeMap example, int } private static class FlattenedFieldSyntheticSourceSupport implements SyntheticSourceSupport { + private final Integer ignoreAbove = randomBoolean() ? randomIntBetween(4, 10) : null; @Override public SyntheticSourceExample example(int maxValues) throws IOException { - // NOTE: values must be keywords and we use a TreeMap to preserve order (doc values are sorted and the result // is created with keys and nested keys in sorted order). final TreeMap map = new TreeMap<>(); @@ -743,6 +743,9 @@ public List invalidExample() throws IOException { private void mapping(XContentBuilder b) throws IOException { b.field("type", "flattened"); + if (ignoreAbove != null) { + b.field("ignore_above", ignoreAbove); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java index ff5baf8ba0877..1df42368041ac 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java @@ -236,8 +236,8 @@ public long cost() { public static BytesRef mockEncodeDenseVector(float[] values, ElementType elementType, IndexVersion indexVersion) { int numBytes = indexVersion.onOrAfter(DenseVectorFieldMapper.MAGNITUDE_STORED_INDEX_VERSION) - ? elementType.elementBytes * values.length + DenseVectorFieldMapper.MAGNITUDE_BYTES - : elementType.elementBytes * values.length; + ? elementType.getNumBytes(values.length) + DenseVectorFieldMapper.MAGNITUDE_BYTES + : elementType.getNumBytes(values.length); double dotProduct = 0f; ByteBuffer byteBuffer = elementType.createByteBuffer(indexVersion, numBytes); for (float value : values) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 6c3f2e19ad4b1..3dd4e31b9ca3f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperTestCase; @@ -71,11 +72,13 @@ public class DenseVectorFieldMapperTests extends MapperTestCase { private final ElementType elementType; private final boolean indexed; private final boolean indexOptionsSet; + private final int dims; public DenseVectorFieldMapperTests() { - this.elementType = randomFrom(ElementType.BYTE, ElementType.FLOAT); + this.elementType = randomFrom(ElementType.BYTE, ElementType.FLOAT, ElementType.BIT); this.indexed = randomBoolean(); this.indexOptionsSet = this.indexed && randomBoolean(); + this.dims = ElementType.BIT == elementType ? 4 * Byte.SIZE : 4; } @Override @@ -89,7 +92,7 @@ protected void minimalMapping(XContentBuilder b, IndexVersion indexVersion) thro } private void indexMapping(XContentBuilder b, IndexVersion indexVersion) throws IOException { - b.field("type", "dense_vector").field("dims", 4); + b.field("type", "dense_vector").field("dims", dims); if (elementType != ElementType.FLOAT) { b.field("element_type", elementType.toString()); } @@ -108,7 +111,7 @@ private void indexMapping(XContentBuilder b, IndexVersion indexVersion) throws I b.endObject(); } if (indexed) { - b.field("similarity", "dot_product"); + b.field("similarity", elementType == ElementType.BIT ? "l2_norm" : "dot_product"); if (indexOptionsSet) { b.startObject("index_options"); b.field("type", "hnsw"); @@ -121,52 +124,86 @@ private void indexMapping(XContentBuilder b, IndexVersion indexVersion) throws I @Override protected Object getSampleValueForDocument() { - return elementType == ElementType.BYTE ? List.of((byte) 1, (byte) 1, (byte) 1, (byte) 1) : List.of(0.5, 0.5, 0.5, 0.5); + return elementType == ElementType.FLOAT ? List.of(0.5, 0.5, 0.5, 0.5) : List.of((byte) 1, (byte) 1, (byte) 1, (byte) 1); } @Override protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck( "dims", - fieldMapping(b -> b.field("type", "dense_vector").field("dims", 4)), - fieldMapping(b -> b.field("type", "dense_vector").field("dims", 5)) + fieldMapping(b -> b.field("type", "dense_vector").field("dims", dims)), + fieldMapping(b -> b.field("type", "dense_vector").field("dims", dims + 8)) ); checker.registerConflictCheck( "similarity", - fieldMapping(b -> b.field("type", "dense_vector").field("dims", 4).field("index", true).field("similarity", "dot_product")), - fieldMapping(b -> b.field("type", "dense_vector").field("dims", 4).field("index", true).field("similarity", "l2_norm")) + fieldMapping(b -> b.field("type", "dense_vector").field("dims", dims).field("index", true).field("similarity", "dot_product")), + fieldMapping(b -> b.field("type", "dense_vector").field("dims", dims).field("index", true).field("similarity", "l2_norm")) ); checker.registerConflictCheck( "index", - fieldMapping(b -> b.field("type", "dense_vector").field("dims", 4).field("index", true).field("similarity", "dot_product")), - fieldMapping(b -> b.field("type", "dense_vector").field("dims", 4).field("index", false)) + fieldMapping(b -> b.field("type", "dense_vector").field("dims", dims).field("index", true).field("similarity", "dot_product")), + fieldMapping(b -> b.field("type", "dense_vector").field("dims", dims).field("index", false)) ); checker.registerConflictCheck( "element_type", fieldMapping( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .field("similarity", "dot_product") .field("element_type", "byte") ), fieldMapping( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .field("similarity", "dot_product") .field("element_type", "float") ) ); + checker.registerConflictCheck( + "element_type", + fieldMapping( + b -> b.field("type", "dense_vector") + .field("dims", dims) + .field("index", true) + .field("similarity", "l2_norm") + .field("element_type", "float") + ), + fieldMapping( + b -> b.field("type", "dense_vector") + .field("dims", dims) + .field("index", true) + .field("similarity", "l2_norm") + .field("element_type", "bit") + ) + ); + checker.registerConflictCheck( + "element_type", + fieldMapping( + b -> b.field("type", "dense_vector") + .field("dims", dims) + .field("index", true) + .field("similarity", "l2_norm") + .field("element_type", "byte") + ), + fieldMapping( + b -> b.field("type", "dense_vector") + .field("dims", dims) + .field("index", true) + .field("similarity", "l2_norm") + .field("element_type", "bit") + ) + ); checker.registerUpdateCheck( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "flat") .endObject(), b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "int8_flat") @@ -175,13 +212,13 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); checker.registerUpdateCheck( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "flat") .endObject(), b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "hnsw") @@ -190,13 +227,13 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); checker.registerUpdateCheck( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "flat") .endObject(), b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "int8_hnsw") @@ -205,13 +242,13 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); checker.registerUpdateCheck( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "int8_flat") .endObject(), b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "hnsw") @@ -220,13 +257,13 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); checker.registerUpdateCheck( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "int8_flat") .endObject(), b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "int8_hnsw") @@ -235,13 +272,13 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); checker.registerUpdateCheck( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "hnsw") .endObject(), b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "int8_hnsw") @@ -252,7 +289,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { "index_options", fieldMapping( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "hnsw") @@ -260,7 +297,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ), fieldMapping( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("index", true) .startObject("index_options") .field("type", "flat") @@ -353,7 +390,7 @@ public void testMergeDims() throws IOException { mapping = mapping(b -> { b.startObject("field"); b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("similarity", "cosine") .field("index", true) .startObject("index_options") @@ -648,7 +685,7 @@ public void testInvalidParameters() { () -> createDocumentMapper( fieldMapping( b -> b.field("type", "dense_vector") - .field("dims", 4) + .field("dims", dims) .field("element_type", "byte") .field("similarity", "l2_norm") .field("index", true) @@ -755,6 +792,20 @@ public void testAddDocumentsToIndexBefore_V_7_5_0() throws Exception { assertArrayEquals("Decoded dense vector values is not equal to the indexed one.", validVector, decodedValues, 0.001f); } + public void testValidateOnBuild() { + final MapperBuilderContext context = MapperBuilderContext.root(false, false); + + // Build a dense vector field mapper with float element type, which will trigger int8 HNSW index options + DenseVectorFieldMapper mapper = new DenseVectorFieldMapper.Builder("test", IndexVersion.current()).elementType(ElementType.FLOAT) + .build(context); + + // Change the element type to byte, which is incompatible with int8 HNSW index options + DenseVectorFieldMapper.Builder builder = (DenseVectorFieldMapper.Builder) mapper.getMergeBuilder(); + builder.elementType(ElementType.BYTE); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.build(context)); + assertThat(e.getMessage(), containsString("[element_type] cannot be [byte] when using index type [int8_hnsw]")); + } + private static float[] decodeDenseVector(IndexVersion indexVersion, BytesRef encodedVector) { int dimCount = VectorEncoderDecoder.denseVectorLength(indexVersion, encodedVector); float[] vector = new float[dimCount]; @@ -1020,6 +1071,7 @@ protected Object generateRandomInputValue(MappedFieldType ft) { } yield floats; } + case BIT -> randomByteArrayOfLength(vectorFieldType.getVectorDimensions() / 8); }; } @@ -1089,7 +1141,7 @@ public void testByteVectorQueryBoundaries() throws IOException { Exception e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { 128, 0, 0 }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { 128, 0, 0 }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1098,7 +1150,7 @@ public void testByteVectorQueryBoundaries() throws IOException { e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { 0.0f, 0f, -129.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { 0.0f, 0f, -129.0f }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1107,7 +1159,7 @@ public void testByteVectorQueryBoundaries() throws IOException { e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { 0.0f, 0.5f, 0.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { 0.0f, 0.5f, 0.0f }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1116,7 +1168,7 @@ public void testByteVectorQueryBoundaries() throws IOException { e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { 0, 0.0f, -0.25f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { 0, 0.0f, -0.25f }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1125,13 +1177,13 @@ public void testByteVectorQueryBoundaries() throws IOException { e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { Float.NaN, 0f, 0.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { Float.NaN, 0f, 0.0f }, 3, 3, null, null, null) ); assertThat(e.getMessage(), containsString("element_type [byte] vectors do not support NaN values but found [NaN] at dim [0];")); e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { Float.POSITIVE_INFINITY, 0f, 0.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { Float.POSITIVE_INFINITY, 0f, 0.0f }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1140,7 +1192,7 @@ public void testByteVectorQueryBoundaries() throws IOException { e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { 0, Float.NEGATIVE_INFINITY, 0.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { 0, Float.NEGATIVE_INFINITY, 0.0f }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1166,13 +1218,13 @@ public void testFloatVectorQueryBoundaries() throws IOException { Exception e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { Float.NaN, 0f, 0.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { Float.NaN, 0f, 0.0f }, 3, 3, null, null, null) ); assertThat(e.getMessage(), containsString("element_type [float] vectors do not support NaN values but found [NaN] at dim [0];")); e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { Float.POSITIVE_INFINITY, 0f, 0.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { Float.POSITIVE_INFINITY, 0f, 0.0f }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1181,7 +1233,7 @@ public void testFloatVectorQueryBoundaries() throws IOException { e = expectThrows( IllegalArgumentException.class, - () -> denseVectorFieldType.createKnnQuery(new float[] { 0, Float.NEGATIVE_INFINITY, 0.0f }, 3, null, null, null) + () -> denseVectorFieldType.createKnnQuery(new float[] { 0, Float.NEGATIVE_INFINITY, 0.0f }, 3, 3, null, null, null) ); assertThat( e.getMessage(), @@ -1196,7 +1248,7 @@ public void testKnnVectorsFormat() throws IOException { boolean setEfConstruction = randomBoolean(); MapperService mapperService = createMapperService(fieldMapping(b -> { b.field("type", "dense_vector"); - b.field("dims", 4); + b.field("dims", dims); b.field("index", true); b.field("similarity", "dot_product"); b.startObject("index_options"); @@ -1234,7 +1286,7 @@ public void testKnnQuantizedFlatVectorsFormat() throws IOException { for (String quantizedFlatFormat : new String[] { "int8_flat", "int4_flat" }) { MapperService mapperService = createMapperService(fieldMapping(b -> { b.field("type", "dense_vector"); - b.field("dims", 4); + b.field("dims", dims); b.field("index", true); b.field("similarity", "dot_product"); b.startObject("index_options"); @@ -1275,7 +1327,7 @@ public void testKnnQuantizedHNSWVectorsFormat() throws IOException { float confidenceInterval = (float) randomDoubleBetween(0.90f, 1.0f, true); MapperService mapperService = createMapperService(fieldMapping(b -> { b.field("type", "dense_vector"); - b.field("dims", 4); + b.field("dims", dims); b.field("index", true); b.field("similarity", "dot_product"); b.startObject("index_options"); @@ -1316,7 +1368,7 @@ public void testKnnHalfByteQuantizedHNSWVectorsFormat() throws IOException { float confidenceInterval = (float) randomDoubleBetween(0.90f, 1.0f, true); MapperService mapperService = createMapperService(fieldMapping(b -> { b.field("type", "dense_vector"); - b.field("dims", 4); + b.field("dims", dims); b.field("index", true); b.field("similarity", "dot_product"); b.startObject("index_options"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index f178e66955fdc..371a01757c935 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -120,11 +120,11 @@ public void testIsAggregatable() { public void testFielddataBuilder() { DenseVectorFieldType fft = createFloatFieldType(); - FieldDataContext fdc = new FieldDataContext("test", () -> null, Set::of, MappedFieldType.FielddataOperation.SCRIPT); + FieldDataContext fdc = new FieldDataContext("test", null, () -> null, Set::of, MappedFieldType.FielddataOperation.SCRIPT); assertNotNull(fft.fielddataBuilder(fdc)); DenseVectorFieldType bft = createByteFieldType(); - FieldDataContext bdc = new FieldDataContext("test", () -> null, Set::of, MappedFieldType.FielddataOperation.SCRIPT); + FieldDataContext bdc = new FieldDataContext("test", null, () -> null, Set::of, MappedFieldType.FielddataOperation.SCRIPT); assertNotNull(bft.fielddataBuilder(bdc)); } @@ -165,7 +165,7 @@ public void testCreateNestedKnnQuery() { for (int i = 0; i < dims; i++) { queryVector[i] = randomFloat(); } - Query query = field.createKnnQuery(queryVector, 10, null, null, producer); + Query query = field.createKnnQuery(queryVector, 10, 10, null, null, producer); assertThat(query, instanceOf(DiversifyingChildrenFloatKnnVectorQuery.class)); } { @@ -185,10 +185,12 @@ public void testCreateNestedKnnQuery() { queryVector[i] = randomByte(); floatQueryVector[i] = queryVector[i]; } - Query query = field.createKnnQuery(queryVector, 10, null, null, producer); + VectorData vectorData = new VectorData(null, queryVector); + Query query = field.createKnnQuery(vectorData, 10, 10, null, null, producer); assertThat(query, instanceOf(DiversifyingChildrenByteKnnVectorQuery.class)); - query = field.createKnnQuery(floatQueryVector, 10, null, null, producer); + vectorData = new VectorData(floatQueryVector, null); + query = field.createKnnQuery(vectorData, 10, 10, null, null, producer); assertThat(query, instanceOf(DiversifyingChildrenByteKnnVectorQuery.class)); } } @@ -249,7 +251,7 @@ public void testFloatCreateKnnQuery() { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> unindexedField.createKnnQuery(new float[] { 0.3f, 0.1f, 1.0f, 0.0f }, 10, null, null, null) + () -> unindexedField.createKnnQuery(new float[] { 0.3f, 0.1f, 1.0f, 0.0f }, 10, 10, null, null, null) ); assertThat(e.getMessage(), containsString("to perform knn search on field [f], its mapping must have [index] set to [true]")); @@ -265,7 +267,7 @@ public void testFloatCreateKnnQuery() { ); e = expectThrows( IllegalArgumentException.class, - () -> dotProductField.createKnnQuery(new float[] { 0.3f, 0.1f, 1.0f, 0.0f }, 10, null, null, null) + () -> dotProductField.createKnnQuery(new float[] { 0.3f, 0.1f, 1.0f, 0.0f }, 10, 10, null, null, null) ); assertThat(e.getMessage(), containsString("The [dot_product] similarity can only be used with unit-length vectors.")); @@ -281,7 +283,7 @@ public void testFloatCreateKnnQuery() { ); e = expectThrows( IllegalArgumentException.class, - () -> cosineField.createKnnQuery(new float[] { 0.0f, 0.0f, 0.0f, 0.0f }, 10, null, null, null) + () -> cosineField.createKnnQuery(new float[] { 0.0f, 0.0f, 0.0f, 0.0f }, 10, 10, null, null, null) ); assertThat(e.getMessage(), containsString("The [cosine] similarity does not support vectors with zero magnitude.")); } @@ -302,7 +304,7 @@ public void testCreateKnnQueryMaxDims() { for (int i = 0; i < 4096; i++) { queryVector[i] = randomFloat(); } - Query query = fieldWith4096dims.createKnnQuery(queryVector, 10, null, null, null); + Query query = fieldWith4096dims.createKnnQuery(queryVector, 10, 10, null, null, null); assertThat(query, instanceOf(KnnFloatVectorQuery.class)); } @@ -321,7 +323,8 @@ public void testCreateKnnQueryMaxDims() { for (int i = 0; i < 4096; i++) { queryVector[i] = randomByte(); } - Query query = fieldWith4096dims.createKnnQuery(queryVector, 10, null, null, null); + VectorData vectorData = new VectorData(null, queryVector); + Query query = fieldWith4096dims.createKnnQuery(vectorData, 10, 10, null, null, null); assertThat(query, instanceOf(KnnByteVectorQuery.class)); } } @@ -339,7 +342,7 @@ public void testByteCreateKnnQuery() { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> unindexedField.createKnnQuery(new float[] { 0.3f, 0.1f, 1.0f }, 10, null, null, null) + () -> unindexedField.createKnnQuery(new float[] { 0.3f, 0.1f, 1.0f }, 10, 10, null, null, null) ); assertThat(e.getMessage(), containsString("to perform knn search on field [f], its mapping must have [index] set to [true]")); @@ -355,11 +358,14 @@ public void testByteCreateKnnQuery() { ); e = expectThrows( IllegalArgumentException.class, - () -> cosineField.createKnnQuery(new float[] { 0.0f, 0.0f, 0.0f }, 10, null, null, null) + () -> cosineField.createKnnQuery(new float[] { 0.0f, 0.0f, 0.0f }, 10, 10, null, null, null) ); assertThat(e.getMessage(), containsString("The [cosine] similarity does not support vectors with zero magnitude.")); - e = expectThrows(IllegalArgumentException.class, () -> cosineField.createKnnQuery(new byte[] { 0, 0, 0 }, 10, null, null, null)); + e = expectThrows( + IllegalArgumentException.class, + () -> cosineField.createKnnQuery(new VectorData(null, new byte[] { 0, 0, 0 }), 10, 10, null, null, null) + ); assertThat(e.getMessage(), containsString("The [cosine] similarity does not support vectors with zero magnitude.")); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 137e0cb348a9c..b0a891dfbb1f7 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -267,6 +267,7 @@ public void testKnnRewriteForInnerHits() throws IOException { KnnVectorQueryBuilder innerQueryBuilder = new KnnVectorQueryBuilder( "nested1." + VECTOR_FIELD, new float[] { 1.0f, 2.0f, 3.0f }, + null, 1, null ); diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index ff6b27924404e..7d018c23597b7 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -272,11 +272,11 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty()); assertThat( newReplica.recoveryState().getTranslog().totalLocal(), - equalTo(Math.toIntExact(globalCheckpointOnOldPrimary - safeCommitOnOldPrimary.get().localCheckpoint)) + equalTo(Math.toIntExact(globalCheckpointOnOldPrimary - safeCommitOnOldPrimary.get().localCheckpoint())) ); assertThat( newReplica.recoveryState().getTranslog().recoveredOperations(), - equalTo(Math.toIntExact(totalDocs - 1 - safeCommitOnOldPrimary.get().localCheckpoint)) + equalTo(Math.toIntExact(totalDocs - 1 - safeCommitOnOldPrimary.get().localCheckpoint())) ); } else { assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index d272aaab1b231..29f39134d2bcf 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -659,7 +659,7 @@ public void testPrimaryFillsSeqNoGapsOnPromotion() throws Exception { public void testPrimaryPromotionRollsGeneration() throws Exception { final IndexShard indexShard = newStartedShard(false); - final long currentTranslogGeneration = getTranslog(indexShard).getGeneration().translogFileGeneration; + final long currentTranslogGeneration = getTranslog(indexShard).getGeneration().translogFileGeneration(); // promote the replica final ShardRouting replicaRouting = indexShard.routingEntry(); @@ -698,7 +698,7 @@ public void onFailure(Exception e) { }, threadPool.generic()); latch.await(); - assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1)); + assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration(), equalTo(currentTranslogGeneration + 1)); assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); closeShards(indexShard); @@ -995,7 +995,7 @@ public void testOperationPermitOnReplicaShards() throws Exception { } final long primaryTerm = indexShard.getPendingPrimaryTerm(); - final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration; + final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration(); final Releasable operation1; final Releasable operation2; @@ -1115,7 +1115,7 @@ private void finish() { assertTrue(onResponse.get()); assertNull(onFailure.get()); assertThat( - getTranslog(indexShard).getGeneration().translogFileGeneration, + getTranslog(indexShard).getGeneration().translogFileGeneration(), // if rollback happens we roll translog twice: one when we flush a commit before opening a read-only engine // and one after replaying translog (upto the global checkpoint); otherwise we roll translog once. either(equalTo(translogGen + 1)).or(equalTo(translogGen + 2)) @@ -4922,7 +4922,11 @@ public void testShardExposesWriteLoadStats() throws Exception { final var recoveryFinishedLatch = new CountDownLatch(1); final var recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { @Override - public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { + public void onRecoveryDone( + RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ) { recoveryFinishedLatch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java index 49de52357d0ba..b1222213a505d 100644 --- a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java @@ -8,12 +8,9 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.NoLockFactory; @@ -49,7 +46,7 @@ public void testPreload() throws IOException { .build(); try (Directory directory = newDirectory(build)) { assertTrue(FsDirectoryFactory.isHybridFs(directory)); - FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; + FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) FilterDirectory.unwrap(directory); assertTrue(FsDirectoryFactory.HybridDirectory.useDelegate("foo.dvd", newIOContext(random()))); assertTrue(FsDirectoryFactory.HybridDirectory.useDelegate("foo.nvd", newIOContext(random()))); assertTrue(FsDirectoryFactory.HybridDirectory.useDelegate("foo.tim", newIOContext(random()))); @@ -69,29 +66,6 @@ public void testPreload() throws IOException { } } - public void testDisableRandomAdvice() throws IOException { - Directory dir = new FilterDirectory(new ByteBuffersDirectory()) { - @Override - public IndexInput openInput(String name, IOContext context) throws IOException { - assertFalse(context.randomAccess); - return super.openInput(name, context); - } - }; - Directory noRandomAccessDir = FsDirectoryFactory.disableRandomAdvice(dir); - try (IndexOutput out = noRandomAccessDir.createOutput("foo", IOContext.DEFAULT)) { - out.writeInt(42); - } - // Test the tester - expectThrows(AssertionError.class, () -> dir.openInput("foo", IOContext.RANDOM)); - - // The wrapped directory shouldn't fail regardless of the IOContext - for (IOContext context : Arrays.asList(IOContext.READ, IOContext.DEFAULT, IOContext.READONCE, IOContext.RANDOM)) { - try (IndexInput in = noRandomAccessDir.openInput("foo", context)) { - assertEquals(42, in.readInt()); - } - } - } - private Directory newDirectory(Settings settings) throws IOException { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("foo", settings); Path tempDir = createTempDir().resolve(idxSettings.getUUID()).resolve("0"); @@ -109,15 +83,16 @@ private void doTestPreload(String... preload) throws IOException { try (Directory dir = directory) { assertSame(dir, directory); // prevent warnings assertFalse(directory instanceof SleepingLockWrapper); + var mmapDirectory = FilterDirectory.unwrap(directory); if (preload.length == 0) { - assertTrue(directory.toString(), directory instanceof MMapDirectory); - assertFalse(((MMapDirectory) directory).getPreload()); + assertTrue(directory.toString(), mmapDirectory instanceof MMapDirectory); + assertFalse(((MMapDirectory) mmapDirectory).getPreload()); } else if (Arrays.asList(preload).contains("*")) { - assertTrue(directory.toString(), directory instanceof MMapDirectory); - assertTrue(((MMapDirectory) directory).getPreload()); + assertTrue(directory.toString(), mmapDirectory instanceof MMapDirectory); + assertTrue(((MMapDirectory) mmapDirectory).getPreload()); } else { - assertTrue(directory.toString(), directory instanceof FsDirectoryFactory.PreLoadMMapDirectory); - FsDirectoryFactory.PreLoadMMapDirectory preLoadMMapDirectory = (FsDirectoryFactory.PreLoadMMapDirectory) directory; + assertTrue(directory.toString(), mmapDirectory instanceof FsDirectoryFactory.PreLoadMMapDirectory); + FsDirectoryFactory.PreLoadMMapDirectory preLoadMMapDirectory = (FsDirectoryFactory.PreLoadMMapDirectory) mmapDirectory; for (String ext : preload) { assertTrue("ext: " + ext, preLoadMMapDirectory.useDelegate("foo." + ext)); assertTrue("ext: " + ext, preLoadMMapDirectory.getDelegate().getPreload()); @@ -166,7 +141,10 @@ private void doTestStoreDirectory(Path tempDir, String typeSettingValue, IndexMo assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory); break; case MMAPFS: - assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory); + assertTrue( + type + " " + directory.getClass().getName() + " " + directory, + FilterDirectory.unwrap(directory) instanceof MMapDirectory + ); break; case FS: if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 3eb4675d37e97..5c1a45dba604d 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -744,7 +744,7 @@ public void testOnCloseCallback() throws IOException { assertEquals(shardId, theLock.getShardId()); assertEquals(lock, theLock); count.incrementAndGet(); - }); + }, false); assertEquals(count.get(), 0); final int iters = randomIntBetween(1, 10); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 6aaeabdc175da..8a277e400ad6c 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.IOUtils; @@ -1251,7 +1250,7 @@ public void testLocationComparison() throws IOException { max = max(max, location); } - assertEquals(max.generation, translog.currentFileGeneration()); + assertEquals(max.generation(), translog.currentFileGeneration()); try (Translog.Snapshot snap = new SortedSnapshot(translog.newSnapshot())) { Translog.Operation next; Translog.Operation maxOp = null; @@ -1656,17 +1655,17 @@ public void testTranslogOperationListener() throws IOException { try (Translog translog = createTranslog(config)) { Location location1 = translog.add(indexOp(randomAlphaOfLength(10), 0, primaryTerm.get())); Location location2 = translog.add(TranslogOperationsUtils.indexOp(randomAlphaOfLength(10), 1, primaryTerm.get())); - long firstGeneration = translog.getGeneration().translogFileGeneration; - assertThat(location1.generation, equalTo(firstGeneration)); - assertThat(location2.generation, equalTo(firstGeneration)); + long firstGeneration = translog.getGeneration().translogFileGeneration(); + assertThat(location1.generation(), equalTo(firstGeneration)); + assertThat(location2.generation(), equalTo(firstGeneration)); translog.rollGeneration(); Location location3 = translog.add(TranslogOperationsUtils.indexOp(randomAlphaOfLength(10), 3, primaryTerm.get())); Location location4 = translog.add(TranslogOperationsUtils.indexOp(randomAlphaOfLength(10), 2, primaryTerm.get())); - long secondGeneration = translog.getGeneration().translogFileGeneration; - assertThat(location3.generation, equalTo(secondGeneration)); - assertThat(location4.generation, equalTo(secondGeneration)); + long secondGeneration = translog.getGeneration().translogFileGeneration(); + assertThat(location3.generation(), equalTo(secondGeneration)); + assertThat(location4.generation(), equalTo(secondGeneration)); assertThat(seqNos, equalTo(List.of(0L, 1L, 3L, 2L))); assertThat(locations, equalTo(List.of(location1, location2, location3, location4))); @@ -1742,7 +1741,7 @@ public void testBasicRecovery() throws IOException { } else { translog = new Translog( config, - translogGeneration.translogUUID, + translogGeneration.translogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, @@ -1750,7 +1749,7 @@ public void testBasicRecovery() throws IOException { ); assertEquals( "lastCommitted must be 1 less than current", - translogGeneration.translogFileGeneration + 1, + translogGeneration.translogFileGeneration() + 1, translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); @@ -1759,7 +1758,7 @@ public void testBasicRecovery() throws IOException { assertEquals( "expected operation" + i + " to be in the previous translog but wasn't", translog.currentFileGeneration() - 1, - locations.get(i).generation + locations.get(i).generation() ); Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); @@ -1783,9 +1782,9 @@ public void testRecoveryUncommitted() throws IOException { assertEquals( "expected this to be the first roll (1 gen is on creation, 2 when opened)", 2L, - translogGeneration.translogFileGeneration + translogGeneration.translogFileGeneration() ); - assertNotNull(translogGeneration.translogUUID); + assertNotNull(translogGeneration.translogUUID()); } } if (sync) { @@ -1809,7 +1808,7 @@ public void testRecoveryUncommitted() throws IOException { assertNotNull(translogGeneration); assertEquals( "lastCommitted must be 2 less than current - we never finished the commit", - translogGeneration.translogFileGeneration + 2, + translogGeneration.translogFileGeneration() + 2, translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); @@ -1836,7 +1835,7 @@ public void testRecoveryUncommitted() throws IOException { assertNotNull(translogGeneration); assertEquals( "lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", - translogGeneration.translogFileGeneration + 3, + translogGeneration.translogFileGeneration() + 3, translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); @@ -1870,9 +1869,9 @@ public void testRecoveryUncommittedFileExists() throws IOException { assertEquals( "expected this to be the first roll (1 gen is on creation, 2 when opened)", 2L, - translogGeneration.translogFileGeneration + translogGeneration.translogFileGeneration() ); - assertNotNull(translogGeneration.translogUUID); + assertNotNull(translogGeneration.translogUUID()); } } if (sync) { @@ -1900,7 +1899,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { assertNotNull(translogGeneration); assertEquals( "lastCommitted must be 2 less than current - we never finished the commit", - translogGeneration.translogFileGeneration + 2, + translogGeneration.translogFileGeneration() + 2, translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); @@ -1928,7 +1927,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { assertNotNull(translogGeneration); assertEquals( "lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", - translogGeneration.translogFileGeneration + 3, + translogGeneration.translogFileGeneration() + 3, translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); @@ -1961,9 +1960,9 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { assertEquals( "expected this to be the first roll (1 gen is on creation, 2 when opened)", 2L, - translogGeneration.translogFileGeneration + translogGeneration.translogFileGeneration() ); - assertNotNull(translogGeneration.translogUUID); + assertNotNull(translogGeneration.translogUUID()); } } translog.sync(); @@ -2016,7 +2015,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { assertNotNull(translogGeneration); assertEquals( "lastCommitted must be 2 less than current - we never finished the commit", - translogGeneration.translogFileGeneration + 2, + translogGeneration.translogFileGeneration() + 2, translog.currentFileGeneration() ); assertFalse(translog.syncNeeded()); @@ -2285,7 +2284,7 @@ public void testOpenForeignTranslog() throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); translog.close(); - final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); + final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID().length()); try { new Translog( config, @@ -2508,7 +2507,7 @@ public void testFailFlush() throws IOException { ) { assertEquals( "lastCommitted must be 1 less than current", - translogGeneration.translogFileGeneration + 1, + translogGeneration.translogFileGeneration() + 1, tlog.currentFileGeneration() ); assertFalse(tlog.syncNeeded()); @@ -2519,7 +2518,7 @@ public void testFailFlush() throws IOException { assertEquals( "expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, - locations.get(i).generation + locations.get(i).generation() ); Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); @@ -2541,7 +2540,7 @@ public void testTranslogOpsCountIsCorrect() throws IOException { assertEquals( "expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), - locations.get(i).generation + locations.get(i).generation() ); Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); @@ -2641,7 +2640,7 @@ protected void afterAdd() throws IOException { assertFalse(translog.isOpen()); final Checkpoint checkpoint = Checkpoint.read(config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME)); // drop all that haven't been synced - writtenOperations.removeIf(next -> checkpoint.offset < (next.location.translogLocation + next.location.size)); + writtenOperations.removeIf(next -> checkpoint.offset < (next.location.translogLocation() + next.location.size())); try ( Translog tlog = new Translog( config, @@ -2665,7 +2664,7 @@ protected void afterAdd() throws IOException { assertEquals( "expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, - writtenOperations.get(i).location.generation + writtenOperations.get(i).location.generation() ); Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); @@ -2696,7 +2695,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { translog.rollGeneration(); } } - long minRetainedGen = translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration; + long minRetainedGen = translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration(); // engine blows up, after committing the above generation translog.close(); TranslogConfig config = translog.getConfig(); @@ -2754,7 +2753,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { } } deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); - minGenForRecovery = translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration; + minGenForRecovery = translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration(); fail.failRandomly(); try { translog.trimUnreferencedReaders(); @@ -2778,7 +2777,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(minGenForRecovery)); assertFilePresences(translog); - minGenForRecovery = translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration; + minGenForRecovery = translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration(); translog.trimUnreferencedReaders(); assertThat(translog.getMinFileGeneration(), equalTo(minGenForRecovery)); assertFilePresences(translog); @@ -3468,12 +3467,15 @@ public void testRollGeneration() throws Exception { translog.add(new Translog.NoOp(seqNo++, primaryTerm.get(), "test")); totalOperations++; } - try (ReleasableLock ignored = translog.writeLock.acquire()) { + translog.writeLock.lock(); + try { if (randomBoolean()) { primaryTerm.incrementAndGet(); } translog.rollGeneration(); primaryTerms.add(primaryTerm.get()); + } finally { + translog.writeLock.unlock(); } assertThat(translog.currentFileGeneration(), equalTo(generation + i + 1)); assertThat(translog.getCurrent().getPrimaryTerm(), equalTo(primaryTerm.get())); @@ -3537,7 +3539,7 @@ public void testMinSeqNoBasedAPI() throws IOException { translog.rollGeneration(); for (long seqNo = 0; seqNo < operations; seqNo++) { final Set> seenSeqNos = new HashSet<>(); - final long generation = translog.getMinGenerationForSeqNo(seqNo).translogFileGeneration; + final long generation = translog.getMinGenerationForSeqNo(seqNo).translogFileGeneration(); int expectedSnapshotOps = 0; for (long g = generation; g < translog.currentFileGeneration(); g++) { if (seqNoPerGeneration.containsKey(g) == false) { @@ -3922,7 +3924,7 @@ public void testSyncConcurrently() throws Exception { assertThat("seq# " + op.seqNo() + " was not marked as persisted", persistedSeqNos, hasItem(op.seqNo())); } Checkpoint checkpoint = translog.getLastSyncedCheckpoint(); - assertThat(checkpoint.offset, greaterThanOrEqualTo(location.translogLocation)); + assertThat(checkpoint.offset, greaterThanOrEqualTo(location.translogLocation())); for (Translog.Operation op : ops) { assertThat(checkpoint.minSeqNo, lessThanOrEqualTo(op.seqNo())); assertThat(checkpoint.maxSeqNo, greaterThanOrEqualTo(op.seqNo())); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index c173a22dcdf57..628ff4b99b133 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; +import org.elasticsearch.index.mapper.IndexModeFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -85,6 +86,7 @@ public Map getMetadataMappers() { TimeSeriesIdFieldMapper.NAME, TimeSeriesRoutingHashFieldMapper.NAME, IndexFieldMapper.NAME, + IndexModeFieldMapper.NAME, SourceFieldMapper.NAME, IgnoredSourceFieldMapper.NAME, NestedPathFieldMapper.NAME, diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index d5359d4510436..088caa5ef6589 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; @@ -79,6 +80,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createBackingIndex; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolverTests.indexBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -416,7 +418,7 @@ public void testVerifyIfIndexContentDeleted() throws Exception { final ClusterService clusterService = getInstanceFromNode(ClusterService.class); final Settings idxSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .put(SETTING_INDEX_UUID, index.getUUID()) .build(); final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) .numberOfShards(1) @@ -454,7 +456,7 @@ public void testDanglingIndicesWithAliasConflict() throws Exception { final LocalAllocateDangledIndices dangling = getInstanceFromNode(LocalAllocateDangledIndices.class); final Settings idxSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) .build(); final IndexMetadata indexMetadata = new IndexMetadata.Builder(alias).settings(idxSettings) .numberOfShards(1) @@ -485,7 +487,7 @@ public void testDanglingIndicesWithLaterVersion() throws Exception { final LocalAllocateDangledIndices dangling = getInstanceFromNode(LocalAllocateDangledIndices.class); final Settings idxSettingsLater = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.fromId(IndexVersion.current().id() + 10000)) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) .build(); final IndexMetadata indexMetadataLater = new IndexMetadata.Builder(indexNameLater).settings(idxSettingsLater) .numberOfShards(1) @@ -513,7 +515,7 @@ public void testIndexAndTombstoneWithSameNameOnStartup() throws Exception { final IndicesService indicesService = getIndicesService(); final Settings idxSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .put(SETTING_INDEX_UUID, index.getUUID()) .build(); final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) .numberOfShards(1) @@ -537,7 +539,7 @@ public void testStandAloneMapperServiceWithPlugins() throws IOException { final IndicesService indicesService = getIndicesService(); final Settings idxSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .put(SETTING_INDEX_UUID, index.getUUID()) .put(IndexModule.SIMILARITY_SETTINGS_PREFIX + ".test.type", "fake-similarity") .build(); final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) @@ -615,7 +617,7 @@ public void testGetEngineFactory() throws IOException { final Index index = new Index(indexName, UUIDs.randomBase64UUID()); final Settings.Builder builder = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); + .put(SETTING_INDEX_UUID, index.getUUID()); if (value != null) { builder.put(FooEnginePlugin.FOO_INDEX_SETTING.getKey(), value); } @@ -638,7 +640,7 @@ public void testConflictingEngineFactories() { final Index index = new Index(indexName, UUIDs.randomBase64UUID()); final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .put(SETTING_INDEX_UUID, index.getUUID()) .put(FooEnginePlugin.FOO_INDEX_SETTING.getKey(), true) .put(BarEnginePlugin.BAR_INDEX_SETTING.getKey(), true) .build(); @@ -828,4 +830,19 @@ public void testLoadSlowLogFieldProvider() { assertEquals(Map.of(), fieldProvider.searchSlowLogFields()); assertEquals(Map.of(), fieldProvider.indexSlowLogFields()); } + + public void testWithTempIndexServiceHandlesExistingIndex() throws Exception { + // This test makes sure that we can run withTempIndexService even if the index already exists + IndicesService indicesService = getIndicesService(); + IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + indexSettings(randomIntBetween(1, 5), randomIntBetween(0, 5)).put("index.version.created", IndexVersions.V_8_10_0) + .put(SETTING_INDEX_UUID, randomUUID()) + ).build(); + IndexService createdIndexService = indicesService.createIndex(indexMetadata, List.of(), true); + indicesService.withTempIndexService(indexMetadata, indexService -> { + assertNotEquals(createdIndexService, indexService); + assertEquals(createdIndexService.index(), indexService.index()); + return null; + }); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 8d2255df9e7e8..ff2f55c791dd3 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -254,6 +254,8 @@ public void testBorrowingSiblingBreakerMemory() { assertThat(exception.getMessage(), containsString("request=157286400/150mb")); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); } + + assertCircuitBreakerLimitWarning(); } public void testParentBreaksOnRealMemoryUsage() { @@ -325,6 +327,8 @@ long currentMemoryUsage() { memoryUsage.set(100); requestBreaker.addEstimateBytesAndMaybeBreak(reservationInBytes, "request"); assertEquals(0, requestBreaker.getTrippedCount()); + + assertCircuitBreakerLimitWarning(); } /** @@ -749,6 +753,7 @@ public void testTrippedCircuitBreakerDurability() { equalTo(expectedDurability) ); } + assertCircuitBreakerLimitWarning(); } public void testAllocationBucketsBreaker() { @@ -785,6 +790,8 @@ public void testAllocationBucketsBreaker() { assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [allocated_buckets] would be")); assertThat(exception.getMessage(), containsString("which is larger than the limit of [100/100b]")); } + + assertCircuitBreakerLimitWarning(); } public void testRegisterCustomCircuitBreakers_WithDuplicates() { @@ -891,7 +898,7 @@ public void testApplySettingForUpdatingUseRealMemory() { service.getParentLimit() ); - // total.limit defaults to 70% of the JVM heap if use_real_memory set to true + // total.limit defaults to 95% of the JVM heap if use_real_memory set to true clusterSettings.applySettings(Settings.builder().put(useRealMemoryUsageSetting, true).build()); assertEquals( MemorySizeValue.parseBytesSizeValueOrHeapRatio("95%", totalCircuitBreakerLimitSetting).getBytes(), @@ -900,6 +907,15 @@ public void testApplySettingForUpdatingUseRealMemory() { } } + public void testSizeBelowMinimumWarning() { + ByteSizeValue sizeValue = MemorySizeValue.parseHeapRatioOrDeprecatedByteSizeValue( + "19%", + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), + 20 + ); + assertWarnings("[indices.breaker.total.limit] setting of [19%] is below the recommended minimum of 20.0% of the heap"); + } + public void testBuildParentTripMessage() { class TestChildCircuitBreaker extends NoopCircuitBreaker { private final long used; @@ -972,4 +988,12 @@ public double getOverhead() { HierarchyCircuitBreakerService.permitNegativeValues = false; } } + + void assertCircuitBreakerLimitWarning() { + assertWarnings( + "[indices.breaker.total.limit] should be specified using a percentage of the heap. " + + "Absolute size settings will be forbidden in a future release" + ); + + } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 60d73f873bbd4..0b67da4067fc9 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -489,6 +489,7 @@ public ClusterState applyStartedShards(ClusterState clusterState, Map safeCommit = shard.store().findSafeIndexCommit(globalCheckpoint); assertTrue(safeCommit.isPresent()); int expectedTotalLocal = 0; - if (safeCommit.get().localCheckpoint < globalCheckpoint) { - try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot(safeCommit.get().localCheckpoint + 1, globalCheckpoint)) { + if (safeCommit.get().localCheckpoint() < globalCheckpoint) { + try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot(safeCommit.get().localCheckpoint() + 1, globalCheckpoint)) { Translog.Operation op; while ((op = snapshot.next()) != null) { if (op.seqNo() <= globalCheckpoint) { @@ -276,7 +276,7 @@ public void testPrepareIndexForPeerRecovery() throws Exception { replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); replica.prepareForIndexRecovery(); if (safeCommit.isPresent()) { - assertThat(recoverLocallyUpToGlobalCheckpoint(replica), equalTo(safeCommit.get().localCheckpoint + 1)); + assertThat(recoverLocallyUpToGlobalCheckpoint(replica), equalTo(safeCommit.get().localCheckpoint() + 1)); assertThat(replica.recoveryState().getTranslog().totalLocal(), equalTo(0)); } else { assertThat(recoverLocallyUpToGlobalCheckpoint(replica), equalTo(UNASSIGNED_SEQ_NO)); @@ -313,7 +313,7 @@ public void testClosedIndexSkipsLocalRecovery() throws Exception { ); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); replica.prepareForIndexRecovery(); - assertThat(recoverLocallyUpToGlobalCheckpoint(replica), equalTo(safeCommit.get().localCheckpoint + 1)); + assertThat(recoverLocallyUpToGlobalCheckpoint(replica), equalTo(safeCommit.get().localCheckpoint() + 1)); assertThat(replica.recoveryState().getTranslog().totalLocal(), equalTo(0)); assertThat(replica.recoveryState().getTranslog().recoveredOperations(), equalTo(0)); assertThat(replica.getLastKnownGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -350,7 +350,11 @@ public void testMarkDoneFailureIsPropagated() throws Exception { PlainActionFuture future = new PlainActionFuture<>(); RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, null, null, new PeerRecoveryTargetService.RecoveryListener() { @Override - public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { + public void onRecoveryDone( + RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ) { future.onResponse(null); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index d5ac683569eba..47c9c5e85f7b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -252,7 +252,7 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { replica.getPendingPrimaryTerm() ); } else { - translogUUIDtoUse = translogGeneration.translogUUID; + translogUUIDtoUse = translogGeneration.translogUUID(); } try (IndexWriter writer = new IndexWriter(replica.store().directory(), iwc)) { userData.put(Engine.HISTORY_UUID_KEY, historyUUIDtoUse); @@ -444,7 +444,11 @@ public long addDocuments(Iterable> expectThrows(Exception.class, () -> group.recoverReplica(replica, (shard, sourceNode) -> { return new RecoveryTarget(shard, sourceNode, 0L, null, null, new PeerRecoveryTargetService.RecoveryListener() { @Override - public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { + public void onRecoveryDone( + RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ) { throw new AssertionError("recovery must fail"); } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index b2b19f14cfd4b..4cb98c8d3c06b 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -1175,7 +1175,7 @@ public void testExecuteBulkRequestCallsDocumentSizeObserver() { AtomicInteger parsedValueWasUsed = new AtomicInteger(0); DocumentParsingProvider documentParsingProvider = new DocumentParsingProvider() { @Override - public DocumentSizeObserver newDocumentSizeObserver() { + public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { return new DocumentSizeObserver() { @Override public XContentParser wrapParser(XContentParser xContentParser) { @@ -1188,6 +1188,7 @@ public long normalisedBytesParsed() { parsedValueWasUsed.incrementAndGet(); return 0; } + }; } }; diff --git a/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java index e3cd11c8f3b68..ecc2f458cdd60 100644 --- a/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java @@ -427,12 +427,12 @@ public String getTestString() { package p; import java.util.ServiceLoader; - import java.util.random.RandomGenerator; + import java.nio.file.spi.FileSystemProvider; public class ServiceCaller { public static String demo() { // check no error if we load a service from the jdk - ServiceLoader randomLoader = ServiceLoader.load(RandomGenerator.class); + ServiceLoader fileSystemLoader = ServiceLoader.load(FileSystemProvider.class); ServiceLoader loader = ServiceLoader.load(MyService.class, ServiceCaller.class.getClassLoader()); return loader.findFirst().get().getTestString(); diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index 1540d3223ae72..fb159f8fb208d 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -31,7 +31,11 @@ public class RecoveriesCollectionTests extends ESIndexLevelReplicationTestCase { static final PeerRecoveryTargetService.RecoveryListener listener = new PeerRecoveryTargetService.RecoveryListener() { @Override - public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { + public void onRecoveryDone( + RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ) { } @@ -69,7 +73,11 @@ public void testRecoveryTimeout() throws Exception { shards.addReplica(), new PeerRecoveryTargetService.RecoveryListener() { @Override - public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { + public void onRecoveryDone( + RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ) { latch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java b/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java index c3b35dc429ebc..096d40ff5b979 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java @@ -16,6 +16,8 @@ import java.io.IOException; +import static org.hamcrest.Matchers.is; + public class ReservedClusterStateHandlerTests extends ESTestCase { public void testValidation() { ReservedClusterStateHandler handler = new ReservedClusterStateHandler<>() { @@ -36,9 +38,9 @@ public ValidRequest fromXContent(XContentParser parser) throws IOException { }; handler.validate(new ValidRequest()); - assertEquals( - "Validation error", - expectThrows(IllegalStateException.class, () -> handler.validate(new InvalidRequest())).getMessage() + assertThat( + expectThrows(IllegalStateException.class, () -> handler.validate(new InvalidRequest())).getMessage(), + is("Validation error") ); } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java b/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java index 08e8e46d4b95a..8e7c70ebef896 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java @@ -26,7 +26,8 @@ import static org.elasticsearch.common.settings.Setting.Property.Dynamic; import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; public class ReservedClusterSettingsActionTests extends ESTestCase { @@ -53,9 +54,9 @@ public void testValidation() throws Exception { "indices.recovery.min_bytes_per_sec": "50mb" }"""; - assertEquals( - "persistent setting [indices.recovery.min_bytes_per_sec], not recognized", - expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage() + assertThat( + expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage(), + is("persistent setting [indices.recovery.min_bytes_per_sec], not recognized") ); } @@ -69,7 +70,7 @@ public void testSetUnsetSettings() throws Exception { String emptyJSON = ""; TransformState updatedState = processJSON(action, prevState, emptyJSON); - assertEquals(0, updatedState.keys().size()); + assertThat(updatedState.keys(), empty()); assertEquals(prevState.state(), updatedState.state()); String settingsJSON = """ @@ -89,8 +90,8 @@ public void testSetUnsetSettings() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, settingsJSON); assertThat(updatedState.keys(), containsInAnyOrder("indices.recovery.max_bytes_per_sec", "cluster.remote.cluster_one.seeds")); - assertEquals("50mb", updatedState.state().metadata().persistentSettings().get("indices.recovery.max_bytes_per_sec")); - assertEquals("[127.0.0.1:9300]", updatedState.state().metadata().persistentSettings().get("cluster.remote.cluster_one.seeds")); + assertThat(updatedState.state().metadata().persistentSettings().get("indices.recovery.max_bytes_per_sec"), is("50mb")); + assertThat(updatedState.state().metadata().persistentSettings().get("cluster.remote.cluster_one.seeds"), is("[127.0.0.1:9300]")); String oneSettingJSON = """ { @@ -100,12 +101,12 @@ public void testSetUnsetSettings() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, oneSettingJSON); assertThat(updatedState.keys(), containsInAnyOrder("indices.recovery.max_bytes_per_sec")); - assertEquals("25mb", updatedState.state().metadata().persistentSettings().get("indices.recovery.max_bytes_per_sec")); + assertThat(updatedState.state().metadata().persistentSettings().get("indices.recovery.max_bytes_per_sec"), is("25mb")); assertNull(updatedState.state().metadata().persistentSettings().get("cluster.remote.cluster_one.seeds")); prevState = updatedState; updatedState = processJSON(action, prevState, emptyJSON); - assertEquals(0, updatedState.keys().size()); + assertThat(updatedState.keys(), empty()); assertNull(updatedState.state().metadata().persistentSettings().get("indices.recovery.max_bytes_per_sec")); } @@ -130,8 +131,8 @@ public void testSettingNameNormalization() throws Exception { TransformState newState = processJSON(testAction, prevState, json); assertThat(newState.keys(), containsInAnyOrder("dummy.setting1", "dummy.setting2")); - assertThat(newState.state().metadata().persistentSettings().get("dummy.setting1"), equalTo("value1")); - assertThat(newState.state().metadata().persistentSettings().get("dummy.setting2"), equalTo("value2")); + assertThat(newState.state().metadata().persistentSettings().get("dummy.setting1"), is("value1")); + assertThat(newState.state().metadata().persistentSettings().get("dummy.setting2"), is("value2")); String jsonRemoval = """ { @@ -142,6 +143,6 @@ public void testSettingNameNormalization() throws Exception { """; TransformState newState2 = processJSON(testAction, prevState, jsonRemoval); assertThat(newState2.keys(), containsInAnyOrder("dummy.setting2")); - assertThat(newState2.state().metadata().persistentSettings().get("dummy.setting2"), equalTo("value2")); + assertThat(newState2.state().metadata().persistentSettings().get("dummy.setting2"), is("value2")); } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index aca5d2cbee2c9..01c3e37a9ae77 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -14,11 +14,14 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -35,12 +38,11 @@ import org.mockito.stubbing.Answer; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; -import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -48,6 +50,8 @@ import java.util.function.Consumer; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.hasEntry; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -58,9 +62,9 @@ public class FileSettingsServiceTests extends ESTestCase { private Environment env; private ClusterService clusterService; - private FileSettingsService fileSettingsService; private ReservedClusterStateService controller; private ThreadPool threadpool; + private FileSettingsService fileSettingsService; @Before public void setUp() throws Exception { @@ -68,20 +72,17 @@ public void setUp() throws Exception { threadpool = new TestThreadPool("file_settings_service_tests"); - clusterService = spy( - new ClusterService( - Settings.builder().put(NODE_NAME_SETTING.getKey(), "test").build(), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadpool, - new TaskManager(Settings.EMPTY, threadpool, Set.of()) - ) + clusterService = new ClusterService( + Settings.builder().put(NODE_NAME_SETTING.getKey(), "test").build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadpool, + new TaskManager(Settings.EMPTY, threadpool, Set.of()) ); - final DiscoveryNode localNode = DiscoveryNodeUtils.create("node"); - final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + DiscoveryNode localNode = DiscoveryNodeUtils.create("node"); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .build(); - doAnswer((Answer) invocation -> clusterState).when(clusterService).state(); clusterService.setNodeConnectionsService(mock(NodeConnectionsService.class)); clusterService.getClusterApplierService().setInitialState(clusterState); @@ -100,16 +101,25 @@ public void setUp() throws Exception { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - controller = new ReservedClusterStateService( - clusterService, - mock(RerouteService.class), - List.of(new ReservedClusterSettingsAction(clusterSettings)) + controller = spy( + new ReservedClusterStateService( + clusterService, + mock(RerouteService.class), + List.of(new ReservedClusterSettingsAction(clusterSettings)) + ) ); fileSettingsService = spy(new FileSettingsService(clusterService, controller, env)); } @After public void tearDown() throws Exception { + if (fileSettingsService.lifecycleState() == Lifecycle.State.STARTED) { + fileSettingsService.stop(); + } + if (fileSettingsService.lifecycleState() == Lifecycle.State.STOPPED) { + fileSettingsService.close(); + } + super.tearDown(); clusterService.close(); threadpool.shutdownNow(); @@ -122,7 +132,6 @@ public void testStartStop() { assertTrue(fileSettingsService.watching()); fileSettingsService.stop(); assertFalse(fileSettingsService.watching()); - fileSettingsService.close(); } public void testOperatorDirName() { @@ -137,85 +146,66 @@ public void testOperatorDirName() { @SuppressWarnings("unchecked") public void testInitialFileError() throws Exception { - ReservedClusterStateService stateService = mock(ReservedClusterStateService.class); - doAnswer((Answer) invocation -> { ((Consumer) invocation.getArgument(2)).accept(new IllegalStateException("Some exception")); return null; - }).when(stateService).process(any(), (XContentParser) any(), any()); + }).when(controller).process(any(), any(XContentParser.class), any()); AtomicBoolean settingsChanged = new AtomicBoolean(false); CountDownLatch latch = new CountDownLatch(1); - final FileSettingsService service = spy(new FileSettingsService(clusterService, stateService, env)); - - service.addFileChangedListener(() -> settingsChanged.set(true)); + fileSettingsService.addFileChangedListener(() -> settingsChanged.set(true)); - doAnswer((Answer) invocation -> { + doAnswer((Answer) invocation -> { try { - invocation.callRealMethod(); + return invocation.callRealMethod(); } finally { latch.countDown(); } - return null; - }).when(service).processFileChanges(); + }).when(fileSettingsService).processFileChanges(); - Files.createDirectories(service.watchedFileDir()); + Files.createDirectories(fileSettingsService.watchedFileDir()); // contents of the JSON don't matter, we just need a file to exist - writeTestFile(service.watchedFile(), "{}"); + writeTestFile(fileSettingsService.watchedFile(), "{}"); - service.start(); - service.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); + fileSettingsService.start(); + fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); // wait until the watcher thread has started, and it has discovered the file assertTrue(latch.await(20, TimeUnit.SECONDS)); - verify(service, times(1)).processFileChanges(); + verify(fileSettingsService, times(1)).processFileChanges(); // assert we never notified any listeners of successful application of file based settings assertFalse(settingsChanged.get()); - - service.stop(); - service.close(); } @SuppressWarnings("unchecked") public void testInitialFileWorks() throws Exception { - ReservedClusterStateService stateService = mock(ReservedClusterStateService.class); - // Let's check that if we didn't throw an error that everything works doAnswer((Answer) invocation -> { ((Consumer) invocation.getArgument(2)).accept(null); return null; - }).when(stateService).process(any(), (XContentParser) any(), any()); + }).when(controller).process(any(), any(XContentParser.class), any()); CountDownLatch latch = new CountDownLatch(1); - final FileSettingsService service = spy(new FileSettingsService(clusterService, stateService, env)); - - service.addFileChangedListener(latch::countDown); + fileSettingsService.addFileChangedListener(latch::countDown); - Files.createDirectories(service.watchedFileDir()); + Files.createDirectories(fileSettingsService.watchedFileDir()); // contents of the JSON don't matter, we just need a file to exist - writeTestFile(service.watchedFile(), "{}"); + writeTestFile(fileSettingsService.watchedFile(), "{}"); - service.start(); - service.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); + fileSettingsService.start(); + fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); // wait for listener to be called assertTrue(latch.await(20, TimeUnit.SECONDS)); - verify(service, times(1)).processFileChanges(); - - service.stop(); - service.close(); + verify(fileSettingsService, times(1)).processFileChanges(); } @SuppressWarnings("unchecked") public void testStopWorksInMiddleOfProcessing() throws Exception { - var spiedController = spy(controller); - var fsService = new FileSettingsService(clusterService, spiedController, env); - FileSettingsService service = spy(fsService); - CountDownLatch processFileLatch = new CountDownLatch(1); CountDownLatch deadThreadLatch = new CountDownLatch(1); @@ -230,41 +220,84 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { throw new RuntimeException(e); } }).start(); - return new ReservedStateChunk(Collections.emptyMap(), new ReservedStateVersion(1L, Version.CURRENT)); - }).when(spiedController).parse(any(String.class), any()); + return new ReservedStateChunk(Map.of(), new ReservedStateVersion(1L, Version.CURRENT)); + }).when(controller).parse(any(String.class), any()); doAnswer((Answer) invocation -> { var completionListener = invocation.getArgument(1, ActionListener.class); completionListener.onResponse(null); return null; - }).when(spiedController).initEmpty(any(String.class), any()); + }).when(controller).initEmpty(any(String.class), any()); - service.start(); - service.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); - assertTrue(service.watching()); + fileSettingsService.start(); + fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); + assertTrue(fileSettingsService.watching()); - Files.createDirectories(service.watchedFileDir()); + Files.createDirectories(fileSettingsService.watchedFileDir()); // Make some fake settings file to cause the file settings service to process it - writeTestFile(service.watchedFile(), "{}"); + writeTestFile(fileSettingsService.watchedFile(), "{}"); // we need to wait a bit, on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, // on Linux is instantaneous. Windows is instantaneous too. assertTrue(processFileLatch.await(30, TimeUnit.SECONDS)); // Stopping the service should interrupt the watcher thread, we should be able to stop - service.stop(); - assertFalse(service.watching()); - service.close(); + fileSettingsService.stop(); + assertFalse(fileSettingsService.watching()); + fileSettingsService.close(); // let the deadlocked thread end, so we can cleanly exit the test deadThreadLatch.countDown(); } + public void testHandleSnapshotRestoreClearsMetadata() throws Exception { + ClusterState state = ClusterState.builder(clusterService.state()) + .metadata( + Metadata.builder(clusterService.state().metadata()) + .put(new ReservedStateMetadata(FileSettingsService.NAMESPACE, 1L, Map.of(), null)) + .build() + ) + .build(); + + Metadata.Builder metadata = Metadata.builder(state.metadata()); + fileSettingsService.handleSnapshotRestore(state, metadata); + + assertThat(metadata.build().reservedStateMetadata(), anEmptyMap()); + } + + public void testHandleSnapshotRestoreResetsMetadata() throws Exception { + fileSettingsService.start(); + fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); + + Files.createDirectories(fileSettingsService.watchedFileDir()); + // contents of the JSON don't matter, we just need a file to exist + writeTestFile(fileSettingsService.watchedFile(), "{}"); + assertTrue(fileSettingsService.watching()); + + ClusterState state = ClusterState.builder(clusterService.state()) + .metadata( + Metadata.builder(clusterService.state().metadata()) + .put(new ReservedStateMetadata(FileSettingsService.NAMESPACE, 1L, Map.of(), null)) + .build() + ) + .build(); + + Metadata.Builder metadata = Metadata.builder(); + fileSettingsService.handleSnapshotRestore(state, metadata); + + assertThat( + metadata.build().reservedStateMetadata(), + hasEntry( + FileSettingsService.NAMESPACE, + new ReservedStateMetadata(FileSettingsService.NAMESPACE, ReservedStateMetadata.RESTORED_VERSION, Map.of(), null) + ) + ); + } + // helpers private void writeTestFile(Path path, String contents) throws IOException { Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, contents.getBytes(StandardCharsets.UTF_8)); + Files.writeString(tempFilePath, contents); Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE); } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index fe9401284b9f5..9167a97c4b5c1 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateAckListener; @@ -26,7 +25,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; -import org.elasticsearch.reservedstate.NonStateTransformResult; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -34,12 +32,12 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.junit.Assert; +import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -47,24 +45,27 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; -import java.util.stream.Collectors; -import static org.elasticsearch.reservedstate.service.ReservedStateUpdateTask.checkMetadataVersion; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.hamcrest.Matchers.startsWith; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; public class ReservedClusterStateServiceTests extends ESTestCase { @@ -74,6 +75,65 @@ private static MasterServiceTaskQueue mo return (MasterServiceTaskQueue) mock(MasterServiceTaskQueue.class); } + private static class TestTaskContext implements ClusterStateTaskExecutor.TaskContext { + private final T task; + + private TestTaskContext(T task) { + this.task = task; + } + + @Override + public T getTask() { + return task; + } + + @Override + public void success(Runnable onPublicationSuccess) { + onPublicationSuccess.run(); + } + + @Override + public void success(Consumer publishedStateConsumer) {} + + @Override + public void success(Runnable onPublicationSuccess, ClusterStateAckListener clusterStateAckListener) {} + + @Override + public void success(Consumer publishedStateConsumer, ClusterStateAckListener clusterStateAckListener) {} + + @Override + public void onFailure(Exception failure) {} + + @Override + public Releasable captureResponseHeaders() { + return null; + } + } + + private static class TestStateHandler implements ReservedClusterStateHandler> { + private final String name; + + private TestStateHandler(String name) { + this.name = name; + } + + @Override + public String name() { + return name; + } + + @Override + public TransformState transform(Object source, TransformState prevState) throws Exception { + ClusterState newState = new ClusterState.Builder(prevState.state()).build(); + return new TransformState(newState, prevState.keys()); + } + + @Override + public Map fromXContent(XContentParser parser) throws IOException { + return parser.map(); + } + } + public void testOperatorController() throws IOException { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterService clusterService = mock(ClusterService.class); @@ -106,9 +166,9 @@ public void testOperatorController() throws IOException { AtomicReference x = new AtomicReference<>(); try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, testJSON)) { - controller.process("operator", parser, (e) -> x.set(e)); + controller.process("operator", parser, x::set); - assertTrue(x.get() instanceof IllegalStateException); + assertThat(x.get(), instanceOf(IllegalStateException.class)); assertThat(x.get().getMessage(), containsString("Error processing state change request for operator")); } @@ -136,16 +196,36 @@ public void testOperatorController() throws IOException { """; try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, testJSON)) { - controller.process("operator", parser, (e) -> { - if (e != null) { - fail("Should not fail"); - } - }); + controller.process("operator", parser, Assert::assertNull); } } - public void testUpdateStateTasks() throws Exception { + public void testInitEmptyTask() { ClusterService clusterService = mock(ClusterService.class); + + ArgumentCaptor updateTask = ArgumentCaptor.captor(); + + // grab the update task when it gets given to us + when(clusterService.createTaskQueue(ArgumentMatchers.contains("reserved state update"), any(), any())).thenAnswer(i -> { + MasterServiceTaskQueue queue = mockTaskQueue(); + doNothing().when(queue).submitTask(any(), updateTask.capture(), any()); + return queue; + }); + + ReservedClusterStateService service = new ReservedClusterStateService(clusterService, mock(RerouteService.class), List.of()); + service.initEmpty("namespace", ActionListener.noop()); + + assertThat(updateTask.getValue(), notNullValue()); + ClusterState state = ClusterState.builder(new ClusterName("test")).build(); + ClusterState updatedState = updateTask.getValue().execute(state); + + assertThat( + updatedState.metadata().reservedStateMetadata(), + equalTo(Map.of("namespace", new ReservedStateMetadata("namespace", ReservedStateMetadata.EMPTY_VERSION, Map.of(), null))) + ); + } + + public void testUpdateStateTasks() throws Exception { RerouteService rerouteService = mock(RerouteService.class); ClusterState state = ClusterState.builder(new ClusterName("test")).build(); @@ -155,53 +235,17 @@ public void testUpdateStateTasks() throws Exception { AtomicBoolean successCalled = new AtomicBoolean(false); ReservedStateUpdateTask task = spy( - new ReservedStateUpdateTask( - "test", - null, - List.of(), - Collections.emptyMap(), - Collections.emptySet(), - errorState -> {}, - new ActionListener<>() { - @Override - public void onResponse(ActionResponse.Empty empty) {} - - @Override - public void onFailure(Exception e) {} - } - ) + new ReservedStateUpdateTask("test", null, Map.of(), Set.of(), errorState -> {}, ActionListener.noop()) ); doReturn(state).when(task).execute(any()); - ClusterStateTaskExecutor.TaskContext taskContext = new ClusterStateTaskExecutor.TaskContext<>() { - @Override - public ReservedStateUpdateTask getTask() { - return task; - } - + ClusterStateTaskExecutor.TaskContext taskContext = new TestTaskContext<>(task) { @Override public void success(Runnable onPublicationSuccess) { - onPublicationSuccess.run(); + super.success(onPublicationSuccess); successCalled.set(true); } - - @Override - public void success(Consumer publishedStateConsumer) {} - - @Override - public void success(Runnable onPublicationSuccess, ClusterStateAckListener clusterStateAckListener) {} - - @Override - public void success(Consumer publishedStateConsumer, ClusterStateAckListener clusterStateAckListener) {} - - @Override - public void onFailure(Exception failure) {} - - @Override - public Releasable captureResponseHeaders() { - return null; - } }; ClusterState newState = taskExecutor.execute( @@ -215,60 +259,61 @@ public Releasable captureResponseHeaders() { verify(rerouteService, times(1)).reroute(anyString(), any(), any()); } - public void testErrorStateTask() throws Exception { + public void testUpdateErrorState() { + ClusterService clusterService = mock(ClusterService.class); ClusterState state = ClusterState.builder(new ClusterName("test")).build(); - final var listenerCompleted = new AtomicBoolean(false); + ArgumentCaptor updateTask = ArgumentCaptor.captor(); + MasterServiceTaskQueue errorQueue = mockTaskQueue(); + doNothing().when(errorQueue).submitTask(any(), updateTask.capture(), any()); - ReservedStateErrorTask task = spy( - new ReservedStateErrorTask( - new ErrorState("test", 1L, List.of("some parse error", "some io error"), ReservedStateErrorMetadata.ErrorKind.PARSING), - new ActionListener<>() { - @Override - public void onResponse(ActionResponse.Empty empty) { - listenerCompleted.set(true); - } - - @Override - public void onFailure(Exception e) {} - } - ) - ); + // grab the update task when it gets given to us + when(clusterService.createTaskQueue(ArgumentMatchers.contains("reserved state error"), any(), any())) + .thenReturn(errorQueue); + when(clusterService.state()).thenReturn(state); - ReservedStateErrorTaskExecutor.TaskContext taskContext = - new ReservedStateErrorTaskExecutor.TaskContext<>() { - @Override - public ReservedStateErrorTask getTask() { - return task; - } + ReservedClusterStateService service = new ReservedClusterStateService(clusterService, mock(RerouteService.class), List.of()); - @Override - public void success(Runnable onPublicationSuccess) { - onPublicationSuccess.run(); - } + ErrorState error = new ErrorState("namespace", 2L, List.of("error"), ReservedStateErrorMetadata.ErrorKind.TRANSIENT); + service.updateErrorState(error); - @Override - public void success(Consumer publishedStateConsumer) {} + assertThat(updateTask.getValue(), notNullValue()); + verify(errorQueue).submitTask(any(), any(), any()); - @Override - public void success(Runnable onPublicationSuccess, ClusterStateAckListener clusterStateAckListener) {} + ClusterState updatedState = updateTask.getValue().execute(state); + assertThat( + updatedState.metadata().reservedStateMetadata().get("namespace"), + equalTo( + new ReservedStateMetadata( + "namespace", + ReservedStateMetadata.NO_VERSION, + Map.of(), + new ReservedStateErrorMetadata(2L, ReservedStateErrorMetadata.ErrorKind.TRANSIENT, List.of("error")) + ) + ) + ); - @Override - public void success(Consumer publishedStateConsumer, ClusterStateAckListener clusterStateAckListener) {} + // it should not update if the error version is less than the current version + when(clusterService.state()).thenReturn(updatedState); + ErrorState oldError = new ErrorState("namespace", 1L, List.of("old error"), ReservedStateErrorMetadata.ErrorKind.TRANSIENT); + service.updateErrorState(oldError); + verifyNoMoreInteractions(errorQueue); + } - @Override - public void onFailure(Exception failure) {} + public void testErrorStateTask() throws Exception { + ClusterState state = ClusterState.builder(new ClusterName("test")).build(); - @Override - public Releasable captureResponseHeaders() { - return null; - } - }; + final var listenerCompleted = new AtomicBoolean(false); - ReservedStateErrorTaskExecutor executor = new ReservedStateErrorTaskExecutor(); + ReservedStateErrorTask task = spy( + new ReservedStateErrorTask( + new ErrorState("test", 1L, List.of("some parse error", "some io error"), ReservedStateErrorMetadata.ErrorKind.PARSING), + ActionListener.running(() -> listenerCompleted.set(true)) + ) + ); - ClusterState newState = executor.execute( - new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(taskContext), () -> null) + ClusterState newState = new ReservedStateErrorTaskExecutor().execute( + new ClusterStateTaskExecutor.BatchExecutionContext<>(state, List.of(new TestTaskContext<>(task)), () -> null) ); verify(task, times(1)).execute(any()); @@ -276,46 +321,19 @@ public Releasable captureResponseHeaders() { ReservedStateMetadata operatorMetadata = newState.metadata().reservedStateMetadata().get("test"); assertNotNull(operatorMetadata); assertNotNull(operatorMetadata.errorMetadata()); - assertEquals(1L, (long) operatorMetadata.errorMetadata().version()); - assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, operatorMetadata.errorMetadata().errorKind()); + assertThat(operatorMetadata.errorMetadata().version(), is(1L)); + assertThat(operatorMetadata.errorMetadata().errorKind(), is(ReservedStateErrorMetadata.ErrorKind.PARSING)); assertThat(operatorMetadata.errorMetadata().errors(), contains("some parse error", "some io error")); assertTrue(listenerCompleted.get()); } public void testUpdateTaskDuplicateError() { - ReservedClusterStateHandler> newStateMaker = new ReservedClusterStateHandler<>() { - @Override - public String name() { - return "maker"; - } - - @Override - public TransformState transform(Object source, TransformState prevState) throws Exception { - ClusterState newState = new ClusterState.Builder(prevState.state()).build(); - return new TransformState(newState, prevState.keys()); - } - - @Override - public Map fromXContent(XContentParser parser) throws IOException { - return parser.map(); - } - }; - - ReservedClusterStateHandler> exceptionThrower = new ReservedClusterStateHandler<>() { - @Override - public String name() { - return "one"; - } - + ReservedClusterStateHandler> newStateMaker = new TestStateHandler("maker"); + ReservedClusterStateHandler> exceptionThrower = new TestStateHandler("one") { @Override public TransformState transform(Object source, TransformState prevState) throws Exception { throw new Exception("anything"); } - - @Override - public Map fromXContent(XContentParser parser) throws IOException { - return parser.map(); - } }; ReservedStateHandlerMetadata hmOne = new ReservedStateHandlerMetadata("one", Set.of("a", "b")); @@ -348,17 +366,10 @@ public Map fromXContent(XContentParser parser) throws IOExceptio ReservedStateUpdateTask task = new ReservedStateUpdateTask( "namespace_one", chunk, - List.of(), Map.of(exceptionThrower.name(), exceptionThrower, newStateMaker.name(), newStateMaker), orderedHandlers, errorState -> assertFalse(ReservedStateErrorTask.isNewError(operatorMetadata, errorState.version())), - new ActionListener<>() { - @Override - public void onResponse(ActionResponse.Empty empty) {} - - @Override - public void onFailure(Exception e) {} - } + ActionListener.noop() ); ClusterService clusterService = mock(ClusterService.class); @@ -366,10 +377,8 @@ public void onFailure(Exception e) {} new ReservedClusterStateService(clusterService, mock(RerouteService.class), List.of(newStateMaker, exceptionThrower)) ); - var trialRunResult = controller.trialRun("namespace_one", state, chunk, new LinkedHashSet<>(orderedHandlers)); - assertEquals(0, trialRunResult.nonStateTransforms().size()); - assertEquals(1, trialRunResult.errors().size()); - assertTrue(trialRunResult.errors().get(0).contains("Error processing one state change:")); + var trialRunErrors = controller.trialRun("namespace_one", state, chunk, new LinkedHashSet<>(orderedHandlers)); + assertThat(trialRunErrors, contains(containsString("Error processing one state change:"))); // We exit on duplicate errors before we update the cluster state error metadata assertThat( @@ -403,22 +412,40 @@ public void onFailure(Exception e) {} public void testCheckMetadataVersion() { ReservedStateMetadata operatorMetadata = ReservedStateMetadata.builder("test").version(123L).build(); - assertTrue(checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.CURRENT))); + ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(Metadata.builder().put(operatorMetadata)).build(); + ReservedStateUpdateTask task = new ReservedStateUpdateTask( + "test", + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.CURRENT)), + Map.of(), + List.of(), + e -> {}, + ActionListener.noop() + ); + assertThat("Cluster state should be modified", task.execute(state), not(sameInstance(state))); - assertFalse(checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(123L, Version.CURRENT))); + task = new ReservedStateUpdateTask( + "test", + new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, Version.CURRENT)), + Map.of(), + List.of(), + e -> {}, + ActionListener.noop() + ); + assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); - assertFalse( - checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))) + task = new ReservedStateUpdateTask( + "test", + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))), + Map.of(), + List.of(), + e -> {}, + ActionListener.noop() ); + assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); } - private ReservedClusterStateHandler> makeHandlerHelper(final String name, final List deps) { - return new ReservedClusterStateHandler<>() { - @Override - public String name() { - return name; - } - + private ReservedClusterStateHandler> makeHandlerHelper(String name, List deps) { + return new TestStateHandler(name) { @Override public TransformState transform(Object source, TransformState prevState) throws Exception { return null; @@ -428,17 +455,12 @@ public TransformState transform(Object source, TransformState prevState) throws public Collection dependencies() { return deps; } - - @Override - public Map fromXContent(XContentParser parser) throws IOException { - return parser.map(); - } }; } public void testHandlerOrdering() { ReservedClusterStateHandler> oh1 = makeHandlerHelper("one", List.of("two", "three")); - ReservedClusterStateHandler> oh2 = makeHandlerHelper("two", Collections.emptyList()); + ReservedClusterStateHandler> oh2 = makeHandlerHelper("two", List.of()); ReservedClusterStateHandler> oh3 = makeHandlerHelper("three", List.of("two")); ClusterService clusterService = mock(ClusterService.class); @@ -447,16 +469,16 @@ public void testHandlerOrdering() { assertThat(ordered, contains("two", "three", "one")); // assure that we bail on unknown handler - assertEquals( - "Unknown handler type: four", + assertThat( expectThrows(IllegalStateException.class, () -> controller.orderedStateHandlers(Set.of("one", "two", "three", "four"))) - .getMessage() + .getMessage(), + is("Unknown handler type: four") ); // assure that we bail on missing dependency link - assertEquals( - "Missing handler dependency definition: one -> three", - expectThrows(IllegalStateException.class, () -> controller.orderedStateHandlers(Set.of("one", "two"))).getMessage() + assertThat( + expectThrows(IllegalStateException.class, () -> controller.orderedStateHandlers(Set.of("one", "two"))).getMessage(), + is("Missing handler dependency definition: one -> three") ); // Change the second handler so that we create cycle @@ -481,15 +503,21 @@ public void testDuplicateHandlerNames() { ClusterState state = ClusterState.builder(clusterName).build(); when(clusterService.state()).thenReturn(state); - assertTrue( + assertThat( expectThrows( IllegalStateException.class, () -> new ReservedClusterStateService( clusterService, mock(RerouteService.class), - List.of(new ReservedClusterSettingsAction(clusterSettings), new TestHandler()) + List.of(new ReservedClusterSettingsAction(clusterSettings), new TestStateHandler(ReservedClusterSettingsAction.NAME) { + @Override + public TransformState transform(Object source, TransformState prevState) throws Exception { + return prevState; + } + }) ) - ).getMessage().startsWith("Duplicate key cluster_settings") + ).getMessage(), + startsWith("Duplicate key cluster_settings") ); } @@ -506,48 +534,17 @@ public void testCheckAndReportError() { var version = new ReservedStateVersion(2L, Version.CURRENT); var error = controller.checkAndReportError("test", List.of("test error"), version); - assertThat(error, allOf(notNullValue(), instanceOf(IllegalStateException.class))); - assertEquals("Error processing state change request for test, errors: test error", error.getMessage()); + assertThat(error, instanceOf(IllegalStateException.class)); + assertThat(error.getMessage(), is("Error processing state change request for test, errors: test error")); verify(controller, times(1)).updateErrorState(any()); } public void testTrialRunExtractsNonStateActions() { - ReservedClusterStateHandler> newStateMaker = new ReservedClusterStateHandler<>() { - @Override - public String name() { - return "maker"; - } - - @Override - public TransformState transform(Object source, TransformState prevState) throws Exception { - ClusterState newState = new ClusterState.Builder(prevState.state()).build(); - return new TransformState(newState, prevState.keys()); - } - - @Override - public Map fromXContent(XContentParser parser) throws IOException { - return parser.map(); - } - }; - - ReservedClusterStateHandler> exceptionThrower = new ReservedClusterStateHandler<>() { - @Override - public String name() { - return "non-state"; - } - + ReservedClusterStateHandler> newStateMaker = new TestStateHandler("maker"); + ReservedClusterStateHandler> exceptionThrower = new TestStateHandler("non-state") { @Override public TransformState transform(Object source, TransformState prevState) { - return new TransformState(prevState.state(), prevState.keys(), (l) -> internalKeys(l)); - } - - private void internalKeys(ActionListener listener) { - listener.onResponse(new NonStateTransformResult(name(), Set.of("key non-state"))); - } - - @Override - public Map fromXContent(XContentParser parser) throws IOException { - return parser.map(); + return new TransformState(prevState.state(), prevState.keys()); } }; @@ -575,119 +572,7 @@ public Map fromXContent(XContentParser parser) throws IOExceptio new ReservedClusterStateService(clusterService, mock(RerouteService.class), List.of(newStateMaker, exceptionThrower)) ); - var trialRunResult = controller.trialRun("namespace_one", state, chunk, new LinkedHashSet<>(orderedHandlers)); - - assertEquals(1, trialRunResult.nonStateTransforms().size()); - assertEquals(0, trialRunResult.errors().size()); - trialRunResult.nonStateTransforms().get(0).accept(new ActionListener<>() { - @Override - public void onResponse(NonStateTransformResult nonStateTransformResult) { - assertThat(nonStateTransformResult.updatedKeys(), containsInAnyOrder("key non-state")); - assertEquals("non-state", nonStateTransformResult.handlerName()); - } - - @Override - public void onFailure(Exception e) { - fail("Should not reach here"); - } - }); - } - - public void testExecuteNonStateTransformationSteps() { - int count = randomInt(10); - var handlers = new ArrayList>(); - var i = 0; - var builder = ReservedStateMetadata.builder("namespace_one").version(1L); - var chunkMap = new HashMap(); - - while (i < count) { - final var key = i++; - var handler = new ReservedClusterStateHandler<>() { - @Override - public String name() { - return "non-state:" + key; - } - - @Override - public TransformState transform(Object source, TransformState prevState) { - return new TransformState(prevState.state(), prevState.keys(), (l) -> internalKeys(l)); - } - - private void internalKeys(ActionListener listener) { - listener.onResponse(new NonStateTransformResult(name(), Set.of("key non-state:" + key))); - } - - @Override - public Map fromXContent(XContentParser parser) throws IOException { - return parser.map(); - } - }; - - builder.putHandler(new ReservedStateHandlerMetadata(handler.name(), Set.of("a", "b"))); - handlers.add(handler); - chunkMap.put(handler.name(), i); - } - - final ReservedStateMetadata operatorMetadata = ReservedStateMetadata.builder("namespace_one").version(1L).build(); - - Metadata metadata = Metadata.builder().put(operatorMetadata).build(); - ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); - - var chunk = new ReservedStateChunk(chunkMap, new ReservedStateVersion(2L, Version.CURRENT)); - - ClusterService clusterService = mock(ClusterService.class); - final var controller = spy(new ReservedClusterStateService(clusterService, mock(RerouteService.class), handlers)); - - var trialRunResult = controller.trialRun( - "namespace_one", - state, - chunk, - new LinkedHashSet<>(handlers.stream().map(h -> h.name()).toList()) - ); - - assertEquals(count, trialRunResult.nonStateTransforms().size()); - ReservedClusterStateService.executeNonStateTransformationSteps(trialRunResult.nonStateTransforms(), new ActionListener<>() { - @Override - public void onResponse(Collection nonStateTransformResults) { - assertEquals(count, nonStateTransformResults.size()); - var expectedHandlers = new ArrayList(); - var expectedValues = new ArrayList(); - for (int i = 0; i < count; i++) { - expectedHandlers.add("non-state:" + i); - expectedValues.add("key non-state:" + i); - } - assertThat( - nonStateTransformResults.stream().map(n -> n.handlerName()).collect(Collectors.toSet()), - containsInAnyOrder(expectedHandlers.toArray(new String[0])) - ); - assertThat( - nonStateTransformResults.stream().map(n -> n.updatedKeys()).flatMap(Set::stream).collect(Collectors.toSet()), - containsInAnyOrder(expectedValues.toArray(new String[0])) - ); - } - - @Override - public void onFailure(Exception e) { - fail("Shouldn't reach here"); - } - }); - } - - class TestHandler implements ReservedClusterStateHandler> { - - @Override - public String name() { - return ReservedClusterSettingsAction.NAME; - } - - @Override - public TransformState transform(Object source, TransformState prevState) { - return prevState; - } - - @Override - public Map fromXContent(XContentParser parser) throws IOException { - return parser.map(); - } + var trialRunErrors = controller.trialRun("namespace_one", state, chunk, new LinkedHashSet<>(orderedHandlers)); + assertThat(trialRunErrors, empty()); } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskTests.java index d887d7edb19f2..72d2310a098cf 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTaskTests.java @@ -22,7 +22,7 @@ public class ReservedStateUpdateTaskTests extends ESTestCase { public void testBlockedClusterState() { - var task = new ReservedStateUpdateTask("dummy", null, List.of(), Map.of(), List.of(), e -> {}, ActionListener.noop()); + var task = new ReservedStateUpdateTask("dummy", null, Map.of(), List.of(), e -> {}, ActionListener.noop()); ClusterState notRecoveredClusterState = ClusterState.builder(ClusterName.DEFAULT) .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) .build(); diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 10ea83e59c0ad..67f42e6cf1808 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -36,6 +36,9 @@ import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpNodeClient; @@ -66,8 +69,12 @@ import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER; import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER_VALUE; +import static org.elasticsearch.rest.RestController.HANDLER_NAME_KEY; +import static org.elasticsearch.rest.RestController.REQUEST_METHOD_KEY; +import static org.elasticsearch.rest.RestController.STATUS_CODE_KEY; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.OPTIONS; +import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -92,6 +99,8 @@ public class RestControllerTests extends ESTestCase { private TestThreadPool threadPool; private NodeClient client; private Tracer tracer; + private LongCounter requestsCounter; + private TelemetryProvider telemetryProvider; private List methodList; @Before @@ -114,7 +123,16 @@ public void setup() { threadPool = createThreadPool(); client = new NoOpNodeClient(threadPool); tracer = mock(Tracer.class); - restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + requestsCounter = mock(LongCounter.class); + telemetryProvider = mock(TelemetryProvider.class); + var mockMeterRegister = mock(MeterRegistry.class); + when(telemetryProvider.getTracer()).thenReturn(tracer); + when(telemetryProvider.getMeterRegistry()).thenReturn(mockMeterRegister); + when(mockMeterRegister.registerLongCounter(eq(RestController.METRIC_REQUESTS_TOTAL), anyString(), anyString())).thenReturn( + requestsCounter + ); + + restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); restController.registerHandler( new Route(GET, "/"), (request, channel, client) -> channel.sendResponse( @@ -136,7 +154,7 @@ public void teardown() throws IOException { public void testApplyProductSpecificResponseHeaders() { final ThreadContext threadContext = client.threadPool().getThreadContext(); - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); @@ -152,7 +170,7 @@ public void testRequestWithDisallowedMultiValuedHeader() { Set headers = new HashSet<>( Arrays.asList(new RestHeaderDefinition("header.1", true), new RestHeaderDefinition("header.2", false)) ); - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", List.of("foo", "bar")); @@ -162,12 +180,122 @@ public void testRequestWithDisallowedMultiValuedHeader() { assertTrue(channel.getSendResponseCalled()); } + public void testDispatchWithNamedHandlerEmitsMetricWithName() { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); + RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); + final RestController spyRestController = spy(restController); + when(spyRestController.getAllHandlers(any(), eq(fakeRequest.rawPath()))).thenReturn(new Iterator<>() { + @Override + public boolean hasNext() { + return true; + } + + @Override + public MethodHandlers next() { + return new MethodHandlers("/").addMethod(GET, RestApiVersion.current(), new RestHandler() { + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) { + channel.sendResponse(new RestResponse(RestStatus.OK, "Test")); + } + + @Override + public String getName() { + return "test_handler_name"; + } + }); + } + }); + AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.OK); + spyRestController.dispatchRequest(fakeRequest, channel, threadContext); + verify(requestsCounter).incrementBy( + eq(1L), + eq(Map.of(STATUS_CODE_KEY, 200, HANDLER_NAME_KEY, "test_handler_name", REQUEST_METHOD_KEY, fakeRequest.method().name())) + ); + } + + public void testDispatchWithoutANamedHandlerEmitsMetricWithNoName() { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); + RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); + final RestController spyRestController = spy(restController); + when(spyRestController.getAllHandlers(any(), eq(fakeRequest.rawPath()))).thenReturn(new Iterator<>() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public MethodHandlers next() { + return null; + } + }); + AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + spyRestController.dispatchRequest(fakeRequest, channel, threadContext); + verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); + } + + public void testDispatchThrowsEmitsMetric() { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); + RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); + final RestController spyRestController = spy(restController); + when(spyRestController.getAllHandlers(any(), eq(fakeRequest.rawPath()))).thenReturn(new Iterator<>() { + @Override + public boolean hasNext() { + return true; + } + + @Override + public MethodHandlers next() { + throw new IllegalArgumentException(); + } + }); + + AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + spyRestController.dispatchRequest(fakeRequest, channel, threadContext); + verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); + } + + public void testDispatchNoHandlerEmitsMetric() { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); + RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); + final RestController spyRestController = spy(restController); + var handlers = List.of(new MethodHandlers("/").addMethod(POST, RestApiVersion.current(), new RestHandler() { + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) { + channel.sendResponse(new RestResponse(RestStatus.OK, "Test")); + } + + @Override + public String getName() { + return "test_handler_name"; + } + })); + when(spyRestController.getAllHandlers(any(), eq(fakeRequest.rawPath()))).thenAnswer(x -> handlers.iterator()); + + AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.METHOD_NOT_ALLOWED); + spyRestController.dispatchRequest(fakeRequest, channel, threadContext); + verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 405))); + } + + public void testDispatchBadRequestEmitsMetric() { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); + RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); + + AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + restController.dispatchBadRequest(channel, threadContext, new Exception()); + verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); + } + /** * Check that dispatching a request causes a trace span to be started. */ public void testDispatchStartsTrace() { final ThreadContext threadContext = client.threadPool().getThreadContext(); - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); final RestController spyRestController = spy(restController); when(spyRestController.getAllHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<>() { @@ -196,7 +324,7 @@ public void testRequestWithDisallowedMultiValuedHeaderButSameValues() { Set headers = new HashSet<>( Arrays.asList(new RestHeaderDefinition("header.1", true), new RestHeaderDefinition("header.2", false)) ); - final RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + final RestController restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", List.of("foo", "foo")); @@ -267,7 +395,7 @@ public void testRegisterAsReplacedHandler() { } public void testRegisterSecondMethodWithDifferentNamedWildcard() { - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest.Method firstMethod = randomFrom(methodList); RestRequest.Method secondMethod = randomFrom(methodList.stream().filter(m -> m != firstMethod).toList()); @@ -297,7 +425,13 @@ public void testRestInterceptor() throws Exception { wrapperCalled.set(true); listener.onResponse(callHandler); }; - final RestController restController = new RestController(interceptor, client, circuitBreakerService, usageService, tracer); + final RestController restController = new RestController( + interceptor, + client, + circuitBreakerService, + usageService, + telemetryProvider + ); restController.registerHandler(new Route(GET, "/wrapped"), handler); RestRequest request = testRestRequest("/wrapped", "{}", XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); @@ -384,7 +518,7 @@ public void testDispatchRequiresContentTypeForRequestsWithContent() { String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, null); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.NOT_ACCEPTABLE); - restController = new RestController(null, null, circuitBreakerService, usageService, tracer); + restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); restController.registerHandler( new Route(GET, "/"), (r, c, client) -> c.sendResponse(new RestResponse(RestStatus.OK, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)) @@ -779,7 +913,7 @@ public Method method() { public void testDispatchCompatibleHandler() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); final RestApiVersion version = RestApiVersion.minimumSupported(); @@ -803,7 +937,7 @@ public void testDispatchCompatibleHandler() { public void testDispatchCompatibleRequestToNewlyAddedHandler() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); final RestApiVersion version = RestApiVersion.minimumSupported(); @@ -846,7 +980,7 @@ private FakeRestRequest requestWithContent(String mediaType) { } public void testCurrentVersionVNDMediaTypeIsNotUsingCompatibility() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); final RestApiVersion version = RestApiVersion.current(); @@ -871,7 +1005,7 @@ public void testCurrentVersionVNDMediaTypeIsNotUsingCompatibility() { } public void testCustomMediaTypeValidation() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); final String mediaType = "application/x-protobuf"; FakeRestRequest fakeRestRequest = requestWithContent(mediaType); @@ -897,7 +1031,7 @@ public void handleRequest(RestRequest request, RestChannel channel, NodeClient c } public void testBrowserSafelistedContentTypesAreRejected() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); final String mediaType = randomFrom(RestController.SAFELISTED_MEDIA_TYPES); FakeRestRequest fakeRestRequest = requestWithContent(mediaType); @@ -918,7 +1052,7 @@ public void handleRequest(RestRequest request, RestChannel channel, NodeClient c } public void testRegisterWithReservedPath() { - final RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); + final RestController restController = new RestController(null, client, circuitBreakerService, usageService, telemetryProvider); for (String path : RestController.RESERVED_PATHS) { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> { restController.registerHandler( @@ -936,7 +1070,13 @@ public void testRegisterWithReservedPath() { * Test that when serverless is disabled, all endpoints are available regardless of ServerlessScope annotations. */ public void testApiProtectionWithServerlessDisabled() { - final RestController restController = new RestController(null, client, circuitBreakerService, new UsageService(), tracer); + final RestController restController = new RestController( + null, + client, + circuitBreakerService, + new UsageService(), + telemetryProvider + ); restController.registerHandler(new PublicRestHandler()); restController.registerHandler(new InternalRestHandler()); restController.registerHandler(new HiddenRestHandler()); @@ -952,7 +1092,13 @@ public void testApiProtectionWithServerlessDisabled() { * Test that when serverless is enabled, a normal user can not access endpoints without a ServerlessScope annotation. */ public void testApiProtectionWithServerlessEnabledAsEndUser() { - final RestController restController = new RestController(null, client, circuitBreakerService, new UsageService(), tracer); + final RestController restController = new RestController( + null, + client, + circuitBreakerService, + new UsageService(), + telemetryProvider + ); restController.registerHandler(new PublicRestHandler()); restController.registerHandler(new InternalRestHandler()); restController.registerHandler(new HiddenRestHandler()); diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index 9c38cd2615355..acb1485740238 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; @@ -80,7 +80,7 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { ); UsageService usageService = new UsageService(); - RestController restController = new RestController(null, null, circuitBreakerService, usageService, Tracer.NOOP); + RestController restController = new RestController(null, null, circuitBreakerService, usageService, TelemetryProvider.NOOP); // A basic RestHandler handles requests to the endpoint RestHandler restHandler = (request, channel, client) -> channel.sendResponse(new RestResponse(RestStatus.OK, "")); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java index 761d2b454b134..59ab7ec719cf4 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.search.AbstractSearchTestCase; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; @@ -53,7 +53,13 @@ public class RestValidateQueryActionTests extends AbstractSearchTestCase { private NodeClient client = new NodeClient(Settings.EMPTY, threadPool); private UsageService usageService = new UsageService(); - private RestController controller = new RestController(null, client, new NoneCircuitBreakerService(), usageService, Tracer.NOOP); + private RestController controller = new RestController( + null, + client, + new NoneCircuitBreakerService(), + usageService, + TelemetryProvider.NOOP + ); private RestValidateQueryAction action = new RestValidateQueryAction(); /** diff --git a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java index 80c93e05b8bd5..8bd53047b2dc7 100644 --- a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java @@ -114,10 +114,10 @@ public void testFloatVectorClassBindings() throws IOException { ); e = expectThrows(IllegalArgumentException.class, () -> new Hamming(scoreScript, queryVector, fieldName)); - assertThat(e.getMessage(), containsString("hamming distance is only supported for byte vectors")); + assertThat(e.getMessage(), containsString("hamming distance is only supported for byte or bit vectors")); e = expectThrows(IllegalArgumentException.class, () -> new Hamming(scoreScript, invalidQueryVector, fieldName)); - assertThat(e.getMessage(), containsString("hamming distance is only supported for byte vectors")); + assertThat(e.getMessage(), containsString("hamming distance is only supported for byte or bit vectors")); // Check scripting infrastructure integration DotProduct dotProduct = new DotProduct(scoreScript, queryVector, fieldName); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 7aa894f0e8aed..7ddcc88facb2a 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -47,7 +47,10 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -151,6 +154,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.IntConsumer; import java.util.function.Supplier; import static java.util.Collections.emptyList; @@ -596,7 +600,7 @@ public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) // execute fetch phase and perform any validations once we retrieve the response // the difference in how we do assertions here is needed because once the transport service sends back the response // it decrements the reference to the FetchSearchResult (through the ActionListener#respondAndRelease) and sets hits to null - service.executeFetchPhase(fetchRequest, searchTask, new ActionListener<>() { + PlainActionFuture fetchListener = new PlainActionFuture<>() { @Override public void onResponse(FetchSearchResult fetchSearchResult) { assertNotNull(fetchSearchResult); @@ -610,13 +614,17 @@ public void onResponse(FetchSearchResult fetchSearchResult) { assertNotNull(hit.getFields().get(fetchFieldName)); assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); } + super.onResponse(fetchSearchResult); } @Override public void onFailure(Exception e) { + super.onFailure(e); throw new AssertionError("No failure should have been raised", e); } - }); + }; + service.executeFetchPhase(fetchRequest, searchTask, fetchListener); + fetchListener.get(); } catch (Exception ex) { if (queryResult != null) { if (queryResult.hasReferences()) { @@ -1981,6 +1989,38 @@ public void testCreateReduceContext() { } } + public void testMultiBucketConsumerServiceCB() { + MultiBucketConsumerService service = new MultiBucketConsumerService( + getInstanceFromNode(ClusterService.class), + Settings.EMPTY, + new NoopCircuitBreaker("test") { + + @Override + public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { + throw new CircuitBreakingException("tripped", getDurability()); + } + } + ); + // for partial + { + IntConsumer consumer = service.createForPartial(); + for (int i = 0; i < 1023; i++) { + consumer.accept(0); + } + CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); + assertThat(ex.getMessage(), equalTo("tripped")); + } + // for final + { + IntConsumer consumer = service.createForFinal(); + for (int i = 0; i < 1023; i++) { + consumer.accept(0); + } + CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); + assertThat(ex.getMessage(), equalTo("tripped")); + } + } + public void testCreateSearchContext() throws IOException { String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); IndexService indexService = createIndex(index); diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchResponseCountTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchResponseCountTelemetryTests.java new file mode 100644 index 0000000000000..af2137c046235 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchResponseCountTelemetryTests.java @@ -0,0 +1,241 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.TelemetryMetrics; + +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.rest.action.search.SearchResponseMetrics; +import org.elasticsearch.search.query.ThrowingQueryBuilder; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; +import static org.elasticsearch.rest.action.search.SearchResponseMetrics.RESPONSE_COUNT_TOTAL_COUNTER_NAME; +import static org.elasticsearch.rest.action.search.SearchResponseMetrics.RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class SearchResponseCountTelemetryTests extends ESSingleNodeTestCase { + + private static final String indexName = "test_search_response_count_metrics"; + + private TestTelemetryPlugin getTestTelemetryPlugin() { + return getInstanceFromNode(PluginsService.class).filterPlugins(TestTelemetryPlugin.class).toList().get(0); + } + + @After + private void resetMeter() { + getTestTelemetryPlugin().resetMeter(); + } + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Before + public void setUpIndex() throws Exception { + var numPrimaries = randomIntBetween(3, 5); + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numPrimaries) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(indexName); + + prepareIndex(indexName).setId("1").setSource("body", "red").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("2").setSource("body", "green").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("3").setSource("body", "blue").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("4").setSource("body", "blue").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("5").setSource("body", "pink").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("6").setSource("body", "brown").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("7").setSource("body", "red").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("8").setSource("body", "purple").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("9").setSource("body", "black").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("10").setSource("body", "green").setRefreshPolicy(IMMEDIATE).get(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(TestTelemetryPlugin.class, TestQueryBuilderPlugin.class); + } + + public static class TestQueryBuilderPlugin extends Plugin implements SearchPlugin { + public TestQueryBuilderPlugin() {} + + @Override + public List> getQueries() { + QuerySpec throwingSpec = new QuerySpec<>(ThrowingQueryBuilder.NAME, ThrowingQueryBuilder::new, p -> { + throw new IllegalStateException("not implemented"); + }); + + return List.of(throwingSpec); + } + } + + public void testSimpleQuery() throws Exception { + assertSearchHitsWithoutFailures(client().prepareSearch(indexName).setQuery(simpleQueryStringQuery("green")), "2", "10"); + assertBusy(() -> { + List measurements = getTestTelemetryPlugin().getLongCounterMeasurement(RESPONSE_COUNT_TOTAL_COUNTER_NAME); + assertThat(measurements.size(), equalTo(1)); + assertThat(measurements.get(0).getLong(), equalTo(1L)); + assertThat( + measurements.get(0).attributes().get(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME), + equalTo(SearchResponseMetrics.ResponseCountTotalStatus.SUCCESS.getDisplayName()) + ); + }); + } + + public void testSearchWithSingleShardFailure() throws Exception { + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("something bad"), 0); + SearchResponse searchResponse = client().prepareSearch(indexName).setQuery(queryBuilder).get(); + try { + assertThat(searchResponse.getFailedShards(), equalTo(1)); + assertBusy(() -> { + List measurements = getTestTelemetryPlugin().getLongCounterMeasurement(RESPONSE_COUNT_TOTAL_COUNTER_NAME); + assertThat(measurements.size(), equalTo(1)); + assertThat(measurements.get(0).getLong(), equalTo(1L)); + assertThat( + measurements.get(0).attributes().get(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME), + equalTo(SearchResponseMetrics.ResponseCountTotalStatus.PARTIAL_FAILURE.getDisplayName()) + ); + }); + } finally { + searchResponse.decRef(); + } + } + + public void testSearchWithAllShardsFail() throws Exception { + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("something bad"), indexName); + SearchPhaseExecutionException exception = expectThrows( + SearchPhaseExecutionException.class, + client().prepareSearch(indexName).setQuery(queryBuilder) + ); + assertThat(exception.getCause().getMessage(), containsString("something bad")); + assertBusy(() -> { + List measurements = getTestTelemetryPlugin().getLongCounterMeasurement(RESPONSE_COUNT_TOTAL_COUNTER_NAME); + assertThat(measurements.size(), equalTo(1)); + assertThat(measurements.get(0).getLong(), equalTo(1L)); + assertThat( + measurements.get(0).attributes().get(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME), + equalTo(SearchResponseMetrics.ResponseCountTotalStatus.FAILURE.getDisplayName()) + ); + }); + } + + public void testScroll() { + assertScrollResponsesAndHitCount( + client(), + TimeValue.timeValueSeconds(60), + client().prepareSearch(indexName).setSize(1).setQuery(simpleQueryStringQuery("green")), + 2, + (respNum, response) -> { + if (respNum <= 2) { + try { + assertBusy(() -> { + List measurements = getTestTelemetryPlugin().getLongCounterMeasurement( + RESPONSE_COUNT_TOTAL_COUNTER_NAME + ); + assertThat(measurements.size(), equalTo(1)); + assertThat(measurements.get(0).getLong(), equalTo(1L)); + assertThat( + measurements.get(0).attributes().get(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME), + equalTo(SearchResponseMetrics.ResponseCountTotalStatus.SUCCESS.getDisplayName()) + ); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + resetMeter(); + } + ); + } + + public void testScrollWithSingleShardFailure() throws Exception { + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("something bad"), 0); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch(indexName).setSize(1).setQuery(queryBuilder); + TimeValue keepAlive = TimeValue.timeValueSeconds(60); + searchRequestBuilder.setScroll(keepAlive); + List responses = new ArrayList<>(); + var scrollResponse = searchRequestBuilder.get(); + responses.add(scrollResponse); + try { + assertBusy(() -> { + List measurements = getTestTelemetryPlugin().getLongCounterMeasurement(RESPONSE_COUNT_TOTAL_COUNTER_NAME); + assertThat(measurements.size(), equalTo(1)); + assertThat(measurements.get(0).getLong(), equalTo(1L)); + assertThat( + measurements.get(0).attributes().get(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME), + equalTo(SearchResponseMetrics.ResponseCountTotalStatus.PARTIAL_FAILURE.getDisplayName()) + ); + }); + int numResponses = 1; + while (scrollResponse.getHits().getHits().length > 0) { + scrollResponse = client().prepareSearchScroll(scrollResponse.getScrollId()).setScroll(keepAlive).get(); + int expectedNumMeasurements = ++numResponses; + responses.add(scrollResponse); + assertBusy(() -> { + List measurements = getTestTelemetryPlugin().getLongCounterMeasurement(RESPONSE_COUNT_TOTAL_COUNTER_NAME); + // verify that one additional measurement recorded (in TransportScrollSearchAction) + assertThat(measurements.size(), equalTo(expectedNumMeasurements)); + // verify that zero shards failed in secondary scroll search rounds + assertThat(measurements.get(expectedNumMeasurements - 1).getLong(), equalTo(1L)); + assertThat( + measurements.get(expectedNumMeasurements - 1).attributes().get(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME), + equalTo(SearchResponseMetrics.ResponseCountTotalStatus.SUCCESS.getDisplayName()) + ); + }); + } + } finally { + ClearScrollResponse clear = client().prepareClearScroll().setScrollIds(Arrays.asList(scrollResponse.getScrollId())).get(); + responses.forEach(SearchResponse::decRef); + assertThat(clear.isSucceeded(), equalTo(true)); + } + } + + public void testScrollWithAllShardsFail() throws Exception { + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("something bad"), indexName); + SearchPhaseExecutionException exception = expectThrows( + SearchPhaseExecutionException.class, + client().prepareSearch(indexName).setSize(1).setQuery(queryBuilder).setScroll(TimeValue.timeValueSeconds(60)) + ); + assertThat(exception.getCause().getMessage(), containsString("something bad")); + assertBusy(() -> { + List measurements = getTestTelemetryPlugin().getLongCounterMeasurement(RESPONSE_COUNT_TOTAL_COUNTER_NAME); + assertThat(measurements.size(), equalTo(1)); + assertThat(measurements.get(0).getLong(), equalTo(1L)); + assertThat( + measurements.get(0).attributes().get(RESPONSE_COUNT_TOTAL_STATUS_ATTRIBUTE_NAME), + equalTo(SearchResponseMetrics.ResponseCountTotalStatus.FAILURE.getDisplayName()) + ); + }); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java index d431a3a156957..7243db95bf826 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java @@ -211,8 +211,8 @@ public boolean supportsParallelCollection(ToLongFunction fieldCardinalit { TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); terms.shardSize(10); - assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(1, 10))); - assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(11, 100))); + assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(1, 9))); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(10, 100))); } { TermsAggregationBuilder terms = new TermsAggregationBuilder("terms"); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 4ec2e5ab49cd3..91078c9babe3d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -766,8 +766,8 @@ public void testNestedWithPipeline() throws IOException { assertNotNull(terms); for (LongTerms.Bucket bucket : terms.getBuckets()) { - Max max = (Max) bucket.getAggregations().asMap().get(MAX_AGG_NAME); - InternalSimpleValue bucketScript = (InternalSimpleValue) bucket.getAggregations().asMap().get("bucketscript"); + Max max = (Max) bucket.getAggregations().get(MAX_AGG_NAME); + InternalSimpleValue bucketScript = (InternalSimpleValue) bucket.getAggregations().get("bucketscript"); assertNotNull(max); assertNotNull(bucketScript); assertEquals(max.value(), -bucketScript.getValue(), Double.MIN_VALUE); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java index 6efcb6c2b99e2..aea87b3394525 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -768,7 +768,7 @@ public void testSingleValuedFieldGlobalAggregation() throws IOException { assertEquals("global", global.getName()); assertEquals(numDocs * 2, global.getDocCount()); assertNotNull(global.getAggregations()); - assertEquals(1, global.getAggregations().asMap().size()); + assertEquals(1, global.getAggregations().asList().size()); final Cardinality cardinality = global.getAggregations().get("cardinality"); assertNotNull(cardinality); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalCardinalityTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalCardinalityTests.java index 0e615da36d7e4..74340ab5f1bf9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalCardinalityTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalCardinalityTests.java @@ -57,7 +57,7 @@ private InternalCardinality createTestInstance(String name, Map 1 ); algos.add(hllpp); - int values = between(0, 1000); + int values = between(20, 1000); for (int i = 0; i < values; i++) { hllpp.collect(0, BitMixer.mix64(randomInt())); } @@ -99,7 +99,8 @@ protected InternalCardinality mutateInstance(InternalCardinality instance) { new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), 0 ); - for (int i = 0; i < 10; i++) { + int values = between(0, 10); + for (int i = 0; i < values; i++) { newState.collect(0, BitMixer.mix64(randomIntBetween(500, 10000))); } algos.add(newState); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java index 50e653d7e5216..5cf7b2f82b940 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java @@ -440,7 +440,7 @@ public void testSingleValuedFieldGetProperty() throws IOException { assertEquals("global", global.getName()); assertEquals(10L, global.getDocCount()); assertNotNull(global.getAggregations()); - assertEquals(1, global.getAggregations().asMap().size()); + assertEquals(1, global.getAggregations().asList().size()); Max max = global.getAggregations().get("max"); assertNotNull(max); @@ -651,7 +651,7 @@ public void testEmptyAggregation() throws Exception { assertEquals("global", global.getName()); assertEquals(0L, global.getDocCount()); assertNotNull(global.getAggregations()); - assertEquals(1, global.getAggregations().asMap().size()); + assertEquals(1, global.getAggregations().asList().size()); Max max = global.getAggregations().get("max"); assertNotNull(max); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java index def58da97c7ca..79ccdceb00613 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java @@ -297,18 +297,18 @@ public void testEmptyBucket() throws IOException { }, (Consumer) histo -> { assertThat(histo.getBuckets().size(), equalTo(3)); - assertNotNull(histo.getBuckets().get(0).getAggregations().asMap().get("min")); - Min min = (Min) histo.getBuckets().get(0).getAggregations().asMap().get("min"); + assertNotNull(histo.getBuckets().get(0).getAggregations().get("min")); + Min min = (Min) histo.getBuckets().get(0).getAggregations().get("min"); assertEquals(1.0, min.value(), 0); assertTrue(AggregationInspectionHelper.hasValue(min)); - assertNotNull(histo.getBuckets().get(1).getAggregations().asMap().get("min")); - min = (Min) histo.getBuckets().get(1).getAggregations().asMap().get("min"); + assertNotNull(histo.getBuckets().get(1).getAggregations().get("min")); + min = (Min) histo.getBuckets().get(1).getAggregations().get("min"); assertEquals(Double.POSITIVE_INFINITY, min.value(), 0); assertFalse(AggregationInspectionHelper.hasValue(min)); - assertNotNull(histo.getBuckets().get(2).getAggregations().asMap().get("min")); - min = (Min) histo.getBuckets().get(2).getAggregations().asMap().get("min"); + assertNotNull(histo.getBuckets().get(2).getAggregations().get("min")); + min = (Min) histo.getBuckets().get(2).getAggregations().get("min"); assertEquals(3.0, min.value(), 0); assertTrue(AggregationInspectionHelper.hasValue(min)); @@ -343,9 +343,9 @@ public void testGetProperty() throws IOException { }, (Consumer) global -> { assertEquals(2, global.getDocCount()); assertTrue(AggregationInspectionHelper.hasValue(global)); - assertNotNull(global.getAggregations().asMap().get("min")); + assertNotNull(global.getAggregations().get("min")); - Min min = (Min) global.getAggregations().asMap().get("min"); + Min min = (Min) global.getAggregations().get("min"); assertEquals(1.0, min.value(), 0); assertThat(global.getProperty("min"), equalTo(min)); assertThat(global.getProperty("min.value"), equalTo(1.0)); diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java index 779e0ad28433a..af408299c4150 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoPointShapeQueryTests.java @@ -37,7 +37,7 @@ protected SpatialQueryBuilders queryBuilder() { @Override protected String fieldTypeName() { - return "geo_shape"; + return "keyword"; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/profile/AbstractProfileBreakdownTests.java b/server/src/test/java/org/elasticsearch/search/profile/AbstractProfileBreakdownTests.java index b8b12357b085e..e988599fccc3b 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/AbstractProfileBreakdownTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/AbstractProfileBreakdownTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.test.ESTestCase; import java.util.Map; -import java.util.concurrent.CountDownLatch; import static org.hamcrest.Matchers.equalTo; @@ -107,35 +106,21 @@ public void testGetBreakdownAndNodeTime() { public void testMultiThreaded() throws InterruptedException { TestProfileBreakdown testBreakdown = new TestProfileBreakdown(); - Thread[] threads = new Thread[200]; - final CountDownLatch latch = new CountDownLatch(1); + final int threads = 200; int startsPerThread = between(1, 5); - for (int t = 0; t < threads.length; t++) { - final TestTimingTypes timingType = randomFrom(TestTimingTypes.values()); - threads[t] = new Thread(() -> { - try { - latch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - Timer timer = testBreakdown.getNewTimer(timingType); - for (int runs = 0; runs < startsPerThread; runs++) { - timer.start(); - timer.stop(); - } - }); - threads[t].start(); - } // starting all threads simultaneously increases the likelihood of failure in case we don't synchronize timer access properly - latch.countDown(); - for (Thread t : threads) { - t.join(); - } + startInParallel(threads, t -> { + final TestTimingTypes timingType = randomFrom(TestTimingTypes.values()); + Timer timer = testBreakdown.getNewTimer(timingType); + for (int runs = 0; runs < startsPerThread; runs++) { + timer.start(); + timer.stop(); + } + }); Map breakdownMap = testBreakdown.toBreakdownMap(); long totalCounter = breakdownMap.get(TestTimingTypes.ONE + "_count") + breakdownMap.get(TestTimingTypes.TWO + "_count") + breakdownMap.get(TestTimingTypes.THREE + "_count"); - assertEquals(threads.length * startsPerThread, totalCounter); - + assertEquals(threads * startsPerThread, totalCounter); } private void runTimerNTimes(Timer t, int n) { diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java index cbbbe7d86f4e2..de35d765a1551 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java @@ -10,9 +10,14 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.usage.SearchUsage; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -23,6 +28,10 @@ import java.util.List; import static org.elasticsearch.search.vectors.KnnSearchBuilderTests.randomVector; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; public class KnnRetrieverBuilderParsingTests extends AbstractXContentTestCase { @@ -34,7 +43,7 @@ public class KnnRetrieverBuilderParsingTests extends AbstractXContentTestCase ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [query]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [query]")); } try ( @@ -44,26 +49,35 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [knn]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [knn]")); } try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"search_after\": [1], \"retriever\":{\"standard\":{}}}")) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [search_after]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [search_after]")); + } try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"terminate_after\": 1, \"retriever\":{\"standard\":{}}}")) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [terminate_after]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [terminate_after]")); } try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"sort\": [\"field\"], \"retriever\":{\"standard\":{}}}")) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [sort]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [sort]")); } try ( @@ -73,14 +87,18 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [rescore]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [rescore]")); } try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"min_score\": 2, \"retriever\":{\"standard\":{}}}")) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [min_score]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [min_score]")); } try ( @@ -90,8 +108,10 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); - assertEquals("cannot specify [retriever] and [query, terminate_after, min_score]", iae.getMessage()); + ssb.parseXContent(parser, true, nf -> true); + ActionRequestValidationException iae = ssb.validate(null, false); + assertNotNull(iae); + assertThat(iae.getMessage(), containsString("cannot specify [retriever] and [query, terminate_after, min_score]")); } } diff --git a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java index bec534d89cc03..cd0d8f8d50c1e 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java @@ -11,8 +11,15 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilderTests; import org.elasticsearch.search.searchafter.SearchAfterBuilderTests; import org.elasticsearch.search.sort.SortBuilderTests; @@ -27,6 +34,11 @@ import java.util.List; import java.util.function.BiFunction; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + public class StandardRetrieverBuilderParsingTests extends AbstractXContentTestCase { /** @@ -59,7 +71,7 @@ public static StandardRetrieverBuilder createRandomStandardRetrieverBuilder( } if (randomBoolean()) { - standardRetrieverBuilder.sortBuilders = SortBuilderTests.randomSortBuilderList(); + standardRetrieverBuilder.sortBuilders = SortBuilderTests.randomSortBuilderList(false); } if (randomBoolean()) { @@ -109,4 +121,52 @@ protected String[] getShuffleFieldsExceptions() { protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); } + + public void testRewrite() throws IOException { + for (int i = 0; i < 10; i++) { + StandardRetrieverBuilder standardRetriever = createTestInstance(); + SearchSourceBuilder source = new SearchSourceBuilder().retriever(standardRetriever); + QueryRewriteContext queryRewriteContext = mock(QueryRewriteContext.class); + source = Rewriteable.rewrite(source, queryRewriteContext); + assertNull(source.retriever()); + assertTrue(source.knnSearch().isEmpty()); + if (standardRetriever.queryBuilder != null) { + assertNotNull(source.query()); + if (standardRetriever.preFilterQueryBuilders.size() > 0) { + if (source.query() instanceof MatchAllQueryBuilder == false + && source.query() instanceof MatchNoneQueryBuilder == false) { + assertThat(source.query(), instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder bq = (BoolQueryBuilder) source.query(); + assertFalse(bq.must().isEmpty()); + assertThat(bq.must().size(), equalTo(1)); + assertThat(bq.must().get(0), equalTo(standardRetriever.queryBuilder)); + for (int j = 0; j < bq.filter().size(); j++) { + assertEqualQueryOrMatchAllNone(bq.filter().get(j), standardRetriever.preFilterQueryBuilders.get(j)); + } + } + } else { + assertEqualQueryOrMatchAllNone(source.query(), standardRetriever.queryBuilder); + } + } else if (standardRetriever.preFilterQueryBuilders.size() > 0) { + if (source.query() instanceof MatchAllQueryBuilder == false && source.query() instanceof MatchNoneQueryBuilder == false) { + assertNotNull(source.query()); + assertThat(source.query(), instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder bq = (BoolQueryBuilder) source.query(); + assertTrue(bq.must().isEmpty()); + for (int j = 0; j < bq.filter().size(); j++) { + assertEqualQueryOrMatchAllNone(bq.filter().get(j), standardRetriever.preFilterQueryBuilders.get(j)); + } + } + } else { + assertNull(source.query()); + } + if (standardRetriever.sortBuilders != null) { + assertThat(source.sorts().size(), equalTo(standardRetriever.sortBuilders.size())); + } + } + } + + private static void assertEqualQueryOrMatchAllNone(QueryBuilder actual, QueryBuilder expected) { + assertThat(actual, anyOf(instanceOf(MatchAllQueryBuilder.class), instanceOf(MatchNoneQueryBuilder.class), equalTo(expected))); + } } diff --git a/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java index eee98297c7a13..84f87b3f01881 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java @@ -119,7 +119,7 @@ public void testSingleFieldSort() throws IOException { public void testRandomSortBuilders() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { Set expectedWarningHeaders = new HashSet<>(); - List> testBuilders = randomSortBuilderList(); + List> testBuilders = randomSortBuilderList(randomBoolean()); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder(); xContentBuilder.startObject(); if (testBuilders.size() > 1) { @@ -171,7 +171,7 @@ public void testRandomSortBuilders() throws IOException { } } - public static List> randomSortBuilderList() { + public static List> randomSortBuilderList(boolean hasPIT) { int size = randomIntBetween(1, 5); List> list = new ArrayList<>(size); for (int i = 0; i < size; i++) { @@ -181,7 +181,7 @@ public static List> randomSortBuilderList() { case 2 -> SortBuilders.fieldSort(FieldSortBuilder.DOC_FIELD_NAME); case 3 -> GeoDistanceSortBuilderTests.randomGeoDistanceSortBuilder(); case 4 -> ScriptSortBuilderTests.randomScriptSortBuilder(); - case 5 -> SortBuilders.pitTiebreaker(); + case 5 -> hasPIT ? SortBuilders.pitTiebreaker() : ScriptSortBuilderTests.randomScriptSortBuilder(); default -> throw new IllegalStateException("unexpected randomization in tests"); }); } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java index 06fdee30968b9..76b6097d7012a 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java @@ -112,7 +112,7 @@ public void testIndexingWithNoContexts() throws Exception { XContentType.JSON ) ); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertContextSuggestFields(fields, 7); } @@ -156,7 +156,7 @@ public void testIndexingWithSimpleContexts() throws Exception { XContentType.JSON ) ); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertContextSuggestFields(fields, 3); } @@ -200,7 +200,7 @@ public void testIndexingWithSimpleNumberContexts() throws Exception { XContentType.JSON ) ); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertContextSuggestFields(fields, 3); } @@ -244,7 +244,7 @@ public void testIndexingWithSimpleBooleanContexts() throws Exception { XContentType.JSON ) ); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertContextSuggestFields(fields, 3); } @@ -328,7 +328,7 @@ public void testIndexingWithContextList() throws Exception { XContentType.JSON ) ); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertContextSuggestFields(fields, 3); } @@ -370,7 +370,7 @@ public void testIndexingWithMixedTypeContextList() throws Exception { XContentType.JSON ) ); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertContextSuggestFields(fields, 3); } @@ -449,7 +449,7 @@ public void testIndexingWithMultipleContexts() throws Exception { .endArray() .endObject(); ParsedDocument parsedDocument = defaultMapper.parse(new SourceToParse("1", BytesReference.bytes(builder), XContentType.JSON)); - List fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); + List fields = parsedDocument.rootDoc().getFields(fieldMapper.fullPath()); assertContextSuggestFields(fields, 3); } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java index b760262cd1ea6..f0899384dbc5e 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -25,6 +26,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermQueryBuilder; @@ -52,7 +54,7 @@ abstract class AbstractKnnVectorQueryBuilderTestCase extends AbstractQueryTestCa abstract DenseVectorFieldMapper.ElementType elementType(); - abstract KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, int numCands, Float similarity); + abstract KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, Integer k, int numCands, Float similarity); @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { @@ -82,8 +84,9 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected KnnVectorQueryBuilder doCreateTestQueryBuilder() { String fieldName = randomBoolean() ? VECTOR_FIELD : VECTOR_ALIAS_FIELD; - int numCands = randomIntBetween(DEFAULT_SIZE, 1000); - KnnVectorQueryBuilder queryBuilder = createKnnVectorQueryBuilder(fieldName, numCands, randomBoolean() ? null : randomFloat()); + Integer k = randomBoolean() ? null : randomIntBetween(1, 100); + int numCands = randomIntBetween(k == null ? DEFAULT_SIZE : k + 20, 1000); + KnnVectorQueryBuilder queryBuilder = createKnnVectorQueryBuilder(fieldName, k, numCands, randomFloat()); if (randomBoolean()) { List filters = new ArrayList<>(); @@ -122,15 +125,17 @@ protected void doAssertLuceneQuery(KnnVectorQueryBuilder queryBuilder, Query que Query filterQuery = booleanQuery.clauses().isEmpty() ? null : booleanQuery; // The field should always be resolved to the concrete field Query knnVectorQueryBuilt = switch (elementType()) { - case BYTE -> new ESKnnByteVectorQuery( + case BYTE, BIT -> new ESKnnByteVectorQuery( VECTOR_FIELD, queryBuilder.queryVector().asByteVector(), + queryBuilder.k(), queryBuilder.numCands(), filterQuery ); case FLOAT -> new ESKnnFloatVectorQuery( VECTOR_FIELD, queryBuilder.queryVector().asFloatVector(), + queryBuilder.k(), queryBuilder.numCands(), filterQuery ); @@ -143,16 +148,27 @@ protected void doAssertLuceneQuery(KnnVectorQueryBuilder queryBuilder, Query que public void testWrongDimension() { SearchExecutionContext context = createSearchExecutionContext(); - KnnVectorQueryBuilder query = new KnnVectorQueryBuilder(VECTOR_FIELD, new float[] { 1.0f, 2.0f }, 10, null); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder(VECTOR_FIELD, new float[] { 1.0f, 2.0f }, 5, 10, null); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> query.doToQuery(context)); - assertThat(e.getMessage(), containsString("the query vector has a different dimension [2] than the index vectors [3]")); + assertThat( + e.getMessage(), + containsString("The query vector has a different number of dimensions [2] than the document vectors [3]") + ); } public void testNonexistentField() { SearchExecutionContext context = createSearchExecutionContext(); - KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("nonexistent", new float[] { 1.0f, 1.0f, 1.0f }, 10, null); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> query.doToQuery(context)); - assertThat(e.getMessage(), containsString("field [nonexistent] does not exist in the mapping")); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("nonexistent", new float[] { 1.0f, 1.0f, 1.0f }, 5, 10, null); + context.setAllowUnmappedFields(false); + QueryShardException e = expectThrows(QueryShardException.class, () -> query.doToQuery(context)); + assertThat(e.getMessage(), containsString("No field mapping can be found for the field with name [nonexistent]")); + } + + public void testNonexistentFieldReturnEmpty() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("nonexistent", new float[] { 1.0f, 1.0f, 1.0f }, 5, 10, null); + Query queryNone = query.doToQuery(context); + assertThat(queryNone, instanceOf(MatchNoDocsQuery.class)); } public void testWrongFieldType() { @@ -160,6 +176,7 @@ public void testWrongFieldType() { KnnVectorQueryBuilder query = new KnnVectorQueryBuilder( AbstractBuilderTestCase.KEYWORD_FIELD_NAME, new float[] { 1.0f, 1.0f, 1.0f }, + 5, 10, null ); @@ -167,9 +184,19 @@ public void testWrongFieldType() { assertThat(e.getMessage(), containsString("[knn] queries are only supported on [dense_vector] fields")); } + public void testNumCandsLessThanK() { + int k = 5; + int numCands = 3; + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new KnnVectorQueryBuilder(VECTOR_FIELD, new float[] { 1.0f, 1.0f, 1.0f }, k, numCands, null) + ); + assertThat(e.getMessage(), containsString("[num_candidates] cannot be less than [k]")); + } + @Override public void testValidOutput() { - KnnVectorQueryBuilder query = new KnnVectorQueryBuilder(VECTOR_FIELD, new float[] { 1.0f, 2.0f, 3.0f }, 10, null); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder(VECTOR_FIELD, new float[] { 1.0f, 2.0f, 3.0f }, null, 10, null); String expected = """ { "knn" : { @@ -183,6 +210,22 @@ public void testValidOutput() { } }"""; assertEquals(expected, query.toString()); + + KnnVectorQueryBuilder query2 = new KnnVectorQueryBuilder(VECTOR_FIELD, new float[] { 1.0f, 2.0f, 3.0f }, 5, 10, null); + String expected2 = """ + { + "knn" : { + "field" : "vector", + "query_vector" : [ + 1.0, + 2.0, + 3.0 + ], + "k" : 5, + "num_candidates" : 10 + } + }"""; + assertEquals(expected2, query2.toString()); } @Override @@ -190,7 +233,13 @@ public void testMustRewrite() throws IOException { SearchExecutionContext context = createSearchExecutionContext(); context.setAllowUnmappedFields(true); TermQueryBuilder termQuery = new TermQueryBuilder("unmapped_field", 42); - KnnVectorQueryBuilder query = new KnnVectorQueryBuilder(VECTOR_FIELD, new float[] { 1.0f, 2.0f, 3.0f }, VECTOR_DIMENSION, null); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder( + VECTOR_FIELD, + new float[] { 1.0f, 2.0f, 3.0f }, + VECTOR_DIMENSION, + null, + null + ); query.addFilterQuery(termQuery); IllegalStateException e = expectThrows(IllegalStateException.class, () -> query.toQuery(context)); @@ -203,7 +252,7 @@ public void testMustRewrite() throws IOException { public void testBWCVersionSerializationFilters() throws IOException { KnnVectorQueryBuilder query = createTestQueryBuilder(); VectorData vectorData = VectorData.fromFloats(query.queryVector().asFloatVector()); - KnnVectorQueryBuilder queryNoFilters = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, query.numCands(), null) + KnnVectorQueryBuilder queryNoFilters = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, null, query.numCands(), null) .queryName(query.queryName()) .boost(query.boost()); TransportVersion beforeFilterVersion = TransportVersionUtils.randomVersionBetween( @@ -217,7 +266,7 @@ public void testBWCVersionSerializationFilters() throws IOException { public void testBWCVersionSerializationSimilarity() throws IOException { KnnVectorQueryBuilder query = createTestQueryBuilder(); VectorData vectorData = VectorData.fromFloats(query.queryVector().asFloatVector()); - KnnVectorQueryBuilder queryNoSimilarity = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, query.numCands(), null) + KnnVectorQueryBuilder queryNoSimilarity = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, null, query.numCands(), null) .queryName(query.queryName()) .boost(query.boost()) .addFilterQueries(query.filterQueries()); @@ -233,10 +282,13 @@ public void testBWCVersionSerializationQuery() throws IOException { ); Float similarity = differentQueryVersion.before(TransportVersions.V_8_8_0) ? null : query.getVectorSimilarity(); VectorData vectorData = VectorData.fromFloats(query.queryVector().asFloatVector()); - KnnVectorQueryBuilder queryOlderVersion = new KnnVectorQueryBuilder(query.getFieldName(), vectorData, query.numCands(), similarity) - .queryName(query.queryName()) - .boost(query.boost()) - .addFilterQueries(query.filterQueries()); + KnnVectorQueryBuilder queryOlderVersion = new KnnVectorQueryBuilder( + query.getFieldName(), + vectorData, + null, + query.numCands(), + similarity + ).queryName(query.queryName()).boost(query.boost()).addFilterQueries(query.filterQueries()); assertBWCSerialization(query, queryOlderVersion, differentQueryVersion); } @@ -263,6 +315,7 @@ public void testRewriteWithQueryVectorBuilder() throws Exception { KnnVectorQueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder( "field", new TestQueryVectorBuilderPlugin.TestQueryVectorBuilder(expectedArray), + null, 5, 1f ); diff --git a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java index 1e77e35b60a4c..5f4fb61718a7e 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java @@ -14,17 +14,14 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractQueryTestCase; -import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; import java.util.Arrays; -import java.util.Collection; -import java.util.List; public class ExactKnnQueryBuilderTests extends AbstractQueryTestCase { @@ -50,11 +47,6 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws ); } - @Override - protected Collection> getPlugins() { - return List.of(TestGeoShapeFieldMapperPlugin.class); - } - @Override protected ExactKnnQueryBuilder doCreateTestQueryBuilder() { float[] query = new float[VECTOR_DIMENSION]; @@ -87,7 +79,9 @@ protected void doAssertLuceneQuery(ExactKnnQueryBuilder queryBuilder, Query quer DenseVectorQuery.Floats denseVectorQuery = (DenseVectorQuery.Floats) query; assertEquals(VECTOR_FIELD, denseVectorQuery.field); float[] expected = Arrays.copyOf(queryBuilder.getQuery().asFloatVector(), queryBuilder.getQuery().asFloatVector().length); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE)) { + float magnitude = VectorUtil.dotProduct(expected, expected); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE) + && DenseVectorFieldMapper.isNotUnitVector(magnitude)) { VectorUtil.l2normalize(expected); assertArrayEquals(expected, denseVectorQuery.getQuery(), 0.0f); } else { diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java index 6c83700d0b29a..8399fed58494c 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnByteVectorQueryBuilderTests.java @@ -17,11 +17,11 @@ DenseVectorFieldMapper.ElementType elementType() { } @Override - protected KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, int numCands, Float similarity) { + protected KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, Integer k, int numCands, Float similarity) { byte[] vector = new byte[VECTOR_DIMENSION]; for (int i = 0; i < vector.length; i++) { vector[i] = randomByte(); } - return new KnnVectorQueryBuilder(fieldName, vector, numCands, similarity); + return new KnnVectorQueryBuilder(fieldName, vector, k, numCands, similarity); } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java index eeb5244d57943..744256bff4ee3 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnFloatVectorQueryBuilderTests.java @@ -17,11 +17,11 @@ DenseVectorFieldMapper.ElementType elementType() { } @Override - KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, int numCands, Float similarity) { + KnnVectorQueryBuilder createKnnVectorQueryBuilder(String fieldName, Integer k, int numCands, Float similarity) { float[] vector = new float[VECTOR_DIMENSION]; for (int i = 0; i < vector.length; i++) { vector[i] = randomFloat(); } - return new KnnVectorQueryBuilder(fieldName, vector, numCands, similarity); + return new KnnVectorQueryBuilder(fieldName, vector, k, numCands, similarity); } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java index 67bc6bde9c1af..d2a5859ae981f 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java @@ -28,14 +28,11 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractQueryTestCase; -import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Comparator; import java.util.List; @@ -49,11 +46,6 @@ public class KnnScoreDocQueryBuilderTests extends AbstractQueryTestCase { - @Override - protected Collection> getPlugins() { - return List.of(TestGeoShapeFieldMapperPlugin.class); - } - @Override protected KnnScoreDocQueryBuilder doCreateTestQueryBuilder() { List scoreDocs = new ArrayList<>(); diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java index 564c8b9d0db11..616b87972faaa 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnSearchBuilderTests.java @@ -166,7 +166,8 @@ public void testToQueryBuilder() { builder.addFilterQuery(filter); } - QueryBuilder expected = new KnnVectorQueryBuilder(field, vector, numCands, similarity).addFilterQueries(filterQueries).boost(boost); + QueryBuilder expected = new KnnVectorQueryBuilder(field, vector, null, numCands, similarity).addFilterQueries(filterQueries) + .boost(boost); assertEquals(expected, builder.toQueryBuilder()); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index b40e33c4baba8..8c9cd8cd54500 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -212,6 +212,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -258,7 +259,7 @@ public void createServices() { } @After - public void verifyReposThenStopServices() { + public void verifyReposThenStopServices() throws ExecutionException { try { clearDisruptionsAndAwaitSync(); diff --git a/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java b/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java index 93dd7bc618756..49314c5c2deab 100644 --- a/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java +++ b/server/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java @@ -9,7 +9,7 @@ package org.elasticsearch.test.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.terms.SignificantTerms; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.concurrent.ExecutionException; import static org.elasticsearch.search.aggregations.AggregationBuilders.significantTerms; @@ -55,9 +54,9 @@ private static void checkSignificantTermsAggregationCorrect(ESIntegTestCase test StringTerms classes = response.getAggregations().get("class"); Assert.assertThat(classes.getBuckets().size(), equalTo(2)); for (Terms.Bucket classBucket : classes.getBuckets()) { - Map aggs = classBucket.getAggregations().asMap(); - Assert.assertTrue(aggs.containsKey("sig_terms")); - SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms"); + InternalAggregations aggs = classBucket.getAggregations(); + Assert.assertNotNull(aggs.get("sig_terms")); + SignificantTerms agg = aggs.get("sig_terms"); Assert.assertThat(agg.getBuckets().size(), equalTo(1)); SignificantTerms.Bucket sigBucket = agg.iterator().next(); String term = sigBucket.getKeyAsString(); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index 761d369d6fc39..c5034f51d1e26 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -46,6 +46,7 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.transport.AbstractSimpleTransportTestCase.IGNORE_DESERIALIZATION_ERRORS_SETTING; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -306,7 +307,18 @@ public void testNodeConnectWithDifferentNodeId() { ConnectTransportException.class, () -> AbstractSimpleTransportTestCase.connectToNode(transportServiceA, discoveryNode, TestProfiles.LIGHT_PROFILE) ); - assertThat(ex.getMessage(), containsString("unexpected remote node")); + assertThat( + ex.getMessage(), + allOf( + containsString("Connecting to [" + discoveryNode.getAddress() + "] failed"), + containsString("expected to connect to [" + discoveryNode.descriptionWithoutAttributes() + "]"), + containsString("found [" + transportServiceB.getLocalNode().descriptionWithoutAttributes() + "] instead"), + containsString("Ensure that each node has its own distinct publish address"), + containsString("routed to the correct node"), + containsString("https://www.elastic.co/guide/en/elasticsearch/reference/"), + containsString("modules-network.html") + ) + ); assertFalse(transportServiceA.nodeConnected(discoveryNode)); } diff --git a/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java index 3e73310ee824f..517cb5b65a529 100644 --- a/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java +++ b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java @@ -22,9 +22,11 @@ import org.elasticsearch.test.cluster.local.distribution.LocalDistributionResolver; import org.elasticsearch.test.cluster.local.distribution.ReleasedDistributionResolver; import org.elasticsearch.test.cluster.local.distribution.SnapshotDistributionResolver; +import org.elasticsearch.test.cluster.util.OS; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.ClassRule; import java.io.BufferedReader; @@ -46,6 +48,11 @@ public class JvmCrashIT extends ESRestTestCase { + @BeforeClass + public static void dontRunWindows() { + assumeFalse("JVM crash log doesn't go to stdout on windows", OS.current() == OS.WINDOWS); + } + private static class StdOutCatchingClusterBuilder extends AbstractLocalClusterSpecBuilder { private StdOutCatchingClusterBuilder() { diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 7906a52479b29..4be630eab5012 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -16,7 +16,6 @@ dependencies { api project(':libs:elasticsearch-ssl-config') api project(":server") api project(":libs:elasticsearch-cli") - api project(':libs:elasticsearch-preallocate') api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequestHelper.java b/test/framework/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequestHelper.java new file mode 100644 index 0000000000000..217b7addeb2de --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequestHelper.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support.master; + +import org.elasticsearch.transport.TransportRequest; + +public class MasterNodeRequestHelper { + public static TransportRequest unwrapTermOverride(TransportRequest transportRequest) { + return transportRequest instanceof TermOverridingMasterNodeRequest termOverridingMasterNodeRequest + ? termOverridingMasterNodeRequest.request + : transportRequest; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 0f60ba9731966..5b656598451a3 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -141,13 +142,17 @@ public static ClusterState state( discoBuilder.localNodeId(newNode(0).getId()); discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures final int primaryTerm = 1 + randomInt(200); + IndexLongFieldRange timeFieldRange = primaryState == ShardRoutingState.STARTED || primaryState == ShardRoutingState.RELOCATING + ? IndexLongFieldRange.UNKNOWN + : IndexLongFieldRange.NO_SHARDS; + IndexMetadata indexMetadata = IndexMetadata.builder(index) .settings(indexSettings(IndexVersion.current(), 1, numberOfReplicas).put(SETTING_CREATION_DATE, System.currentTimeMillis())) .primaryTerm(0, primaryTerm) - .timestampRange( - primaryState == ShardRoutingState.STARTED || primaryState == ShardRoutingState.RELOCATING - ? IndexLongFieldRange.UNKNOWN - : IndexLongFieldRange.NO_SHARDS + .timestampRange(timeFieldRange) + .eventIngestedRange( + timeFieldRange, + timeFieldRange == IndexLongFieldRange.UNKNOWN ? null : TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE ) .build(); @@ -281,6 +286,10 @@ public static ClusterState state(final int numberOfNodes, final String[] indices .settings( indexSettings(IndexVersion.current(), numberOfPrimaries, 0).put(SETTING_CREATION_DATE, System.currentTimeMillis()) ) + .eventIngestedRange( + IndexLongFieldRange.UNKNOWN, + randomFrom(TransportVersions.V_8_0_0, TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + ) .build(); IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()); @@ -386,6 +395,7 @@ public static ClusterState stateWithAssignedPrimariesAndReplicas( ) ) .timestampRange(IndexLongFieldRange.UNKNOWN) + .eventIngestedRange(IndexLongFieldRange.UNKNOWN, null) .build(); metadataBuilder.put(indexMetadata, false).generateClusterUuidIfNeeded(); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 8ef80c08517de..c231502f9692c 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -14,7 +14,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.filesystem.FileSystemNatives; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.settings.Settings; @@ -91,9 +90,6 @@ public class BootstrapForTesting { final boolean systemCallFilter = Booleans.parseBoolean(System.getProperty("tests.system_call_filter", "true")); Elasticsearch.initializeNatives(javaTmpDir, memoryLock, systemCallFilter, true); - // init filesystem natives - FileSystemNatives.init(); - // initialize probes Elasticsearch.initializeProbes(); @@ -222,7 +218,6 @@ static Map getCodebases() { addClassCodebase(codebases, "elasticsearch-rest-client", "org.elasticsearch.client.RestClient"); addClassCodebase(codebases, "elasticsearch-core", "org.elasticsearch.core.Booleans"); addClassCodebase(codebases, "elasticsearch-cli", "org.elasticsearch.cli.Command"); - addClassCodebase(codebases, "elasticsearch-preallocate", "org.elasticsearch.preallocate.Preallocate"); addClassCodebase(codebases, "elasticsearch-simdvec", "org.elasticsearch.simdvec.VectorScorerFactory"); addClassCodebase(codebases, "framework", "org.elasticsearch.test.ESTestCase"); return codebases; diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index f3fac694f9980..751d3bce2fb33 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -331,7 +331,7 @@ public static ClusterState startShardsAndReroute( public static ClusterState reroute(AllocationService allocationService, ClusterState clusterState) { final var listener = new PlainActionFuture(); final var result = allocationService.reroute(clusterState, "test reroute", listener); - listener.result(); // ensures it completed successfully + safeGet(listener::result); // ensures it completed successfully return result; } diff --git a/test/framework/src/main/java/org/elasticsearch/geo/GeometryPointCountVisitor.java b/test/framework/src/main/java/org/elasticsearch/geo/GeometryPointCountVisitor.java new file mode 100644 index 0000000000000..06b468250bd8f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/geo/GeometryPointCountVisitor.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.geo; + +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.GeometryVisitor; +import org.elasticsearch.geometry.Line; +import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; +import org.elasticsearch.geometry.MultiPoint; +import org.elasticsearch.geometry.MultiPolygon; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Polygon; +import org.elasticsearch.geometry.Rectangle; + +public class GeometryPointCountVisitor implements GeometryVisitor { + + @Override + public Integer visit(Circle circle) throws RuntimeException { + return 2; + } + + @Override + public Integer visit(GeometryCollection collection) throws RuntimeException { + int size = 0; + for (Geometry geometry : collection) { + size += geometry.visit(this); + } + return size; + } + + @Override + public Integer visit(Line line) throws RuntimeException { + return line.length(); + } + + @Override + public Integer visit(LinearRing ring) throws RuntimeException { + return ring.length(); + } + + @Override + public Integer visit(MultiLine multiLine) throws RuntimeException { + return visit((GeometryCollection) multiLine); + } + + @Override + public Integer visit(MultiPoint multiPoint) throws RuntimeException { + return multiPoint.size(); + } + + @Override + public Integer visit(MultiPolygon multiPolygon) throws RuntimeException { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Integer visit(Point point) throws RuntimeException { + return 1; + } + + @Override + public Integer visit(Polygon polygon) throws RuntimeException { + int size = polygon.getPolygon().length(); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + size += polygon.getHole(i).length(); + } + return size; + } + + @Override + public Integer visit(Rectangle rectangle) throws RuntimeException { + return 4; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java index ab15204a6095c..1c017a9da18ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java @@ -205,6 +205,11 @@ public static Geometry randomGeometry(boolean hasAlt) { return randomGeometry(0, hasAlt); } + public static Geometry randomGeometry(boolean hasAlt, int maxPoints) { + var pointCounter = new GeometryPointCountVisitor(); + return randomValueOtherThanMany(g -> g.visit(pointCounter) > maxPoints, () -> randomGeometry(0, hasAlt)); + } + protected static Geometry randomGeometry(int level, boolean hasAlt) { @SuppressWarnings("unchecked") Function geometry = ESTestCase.randomFrom( diff --git a/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java index 1e21ad1acfd08..4918bd92fdfee 100644 --- a/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java @@ -184,6 +184,11 @@ public static Geometry randomGeometry(boolean hasAlt) { return randomGeometry(0, hasAlt); } + public static Geometry randomGeometry(boolean hasAlt, int maxPoints) { + var pointCounter = new GeometryPointCountVisitor(); + return randomValueOtherThanMany(g -> g.visit(pointCounter) > maxPoints, () -> randomGeometry(0, hasAlt)); + } + protected static Geometry randomGeometry(int level, boolean hasAlt) { @SuppressWarnings("unchecked") Function geometry = ESTestCase.randomFrom( diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 1c7cabb541581..70738c510f62a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -125,7 +125,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -1179,33 +1178,24 @@ public static void assertOpsOnReplica( } public static void concurrentlyApplyOps(List ops, InternalEngine engine) throws InterruptedException { - Thread[] thread = new Thread[randomIntBetween(3, 5)]; - CountDownLatch startGun = new CountDownLatch(thread.length); + final int threadCount = randomIntBetween(3, 5); AtomicInteger offset = new AtomicInteger(-1); - for (int i = 0; i < thread.length; i++) { - thread[i] = new Thread(() -> { - startGun.countDown(); - safeAwait(startGun); - int docOffset; - while ((docOffset = offset.incrementAndGet()) < ops.size()) { - try { - applyOperation(engine, ops.get(docOffset)); - if ((docOffset + 1) % 4 == 0) { - engine.refresh("test"); - } - if (rarely()) { - engine.flush(); - } - } catch (IOException e) { - throw new AssertionError(e); + startInParallel(threadCount, i -> { + int docOffset; + while ((docOffset = offset.incrementAndGet()) < ops.size()) { + try { + applyOperation(engine, ops.get(docOffset)); + if ((docOffset + 1) % 4 == 0) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); } + } catch (IOException e) { + throw new AssertionError(e); } - }); - thread[i].start(); - } - for (int i = 0; i < thread.length; i++) { - thread[i].join(); - } + } + }); } public static void applyOperations(Engine engine, List operations) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java index 675b5959f35a3..d812e158a1675 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.query.ExistsQueryBuilder; @@ -265,6 +266,7 @@ protected static FieldDataContext mockFielddataContext() { SearchExecutionContext searchExecutionContext = mockContext(); return new FieldDataContext( "test", + null, searchExecutionContext::lookup, mockContext()::sourcePath, MappedFieldType.FielddataOperation.SCRIPT @@ -299,7 +301,7 @@ protected static SearchExecutionContext mockContext( when(context.allowExpensiveQueries()).thenReturn(allowExpensiveQueries); SearchLookup lookup = new SearchLookup( context::getFieldType, - (mft, lookupSupplier, fdo) -> mft.fielddataBuilder(new FieldDataContext("test", lookupSupplier, context::sourcePath, fdo)) + (mft, lookupSupplier, fdo) -> mft.fielddataBuilder(new FieldDataContext("test", null, lookupSupplier, context::sourcePath, fdo)) .build(null, null), sourceProvider ); @@ -307,7 +309,7 @@ protected static SearchExecutionContext mockContext( when(context.getForField(any(), any())).then(args -> { MappedFieldType ft = args.getArgument(0); MappedFieldType.FielddataOperation fdo = args.getArgument(1); - return ft.fielddataBuilder(new FieldDataContext("test", context::lookup, context::sourcePath, fdo)) + return ft.fielddataBuilder(new FieldDataContext("test", null, context::lookup, context::sourcePath, fdo)) .build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()); }); when(context.getMatchingFieldNames(any())).thenReturn(Set.of("dummy_field")); @@ -452,6 +454,11 @@ public String indexName() { throw new UnsupportedOperationException(); } + @Override + public IndexSettings indexSettings() { + throw new UnsupportedOperationException(); + } + @Override public MappedFieldType.FieldExtractPreference fieldExtractPreference() { return MappedFieldType.FieldExtractPreference.NONE; diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java index 53ecb75c18d9a..6abe923851318 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java @@ -17,7 +17,9 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; @@ -27,15 +29,20 @@ public class KeywordFieldSyntheticSourceSupport implements MapperTestCase.Synthe private final boolean store; private final boolean docValues; private final String nullValue; - private final boolean exampleSortsUsingIgnoreAbove; - KeywordFieldSyntheticSourceSupport(Integer ignoreAbove, boolean store, String nullValue, boolean exampleSortsUsingIgnoreAbove) { + KeywordFieldSyntheticSourceSupport(Integer ignoreAbove, boolean store, String nullValue, boolean useFallbackSyntheticSource) { this.ignoreAbove = ignoreAbove; this.allIgnored = ignoreAbove != null && LuceneTestCase.rarely(); this.store = store; this.nullValue = nullValue; - this.exampleSortsUsingIgnoreAbove = exampleSortsUsingIgnoreAbove; - this.docValues = store ? ESTestCase.randomBoolean() : true; + this.docValues = useFallbackSyntheticSource == false || ESTestCase.randomBoolean(); + } + + @Override + public boolean preservesExactSource() { + // We opt in into fallback synthetic source implementation + // if there is nothing else to use, and it preserves exact source data. + return store == false && docValues == false; } @Override @@ -46,36 +53,48 @@ public MapperTestCase.SyntheticSourceExample example(int maxValues) { public MapperTestCase.SyntheticSourceExample example(int maxValues, boolean loadBlockFromSource) { if (ESTestCase.randomBoolean()) { Tuple v = generateValue(); + Object sourceValue = preservesExactSource() ? v.v1() : v.v2(); Object loadBlock = v.v2(); if (loadBlockFromSource == false && ignoreAbove != null && v.v2().length() > ignoreAbove) { loadBlock = null; } - return new MapperTestCase.SyntheticSourceExample(v.v1(), v.v2(), loadBlock, this::mapping); + return new MapperTestCase.SyntheticSourceExample(v.v1(), sourceValue, loadBlock, this::mapping); } List> values = ESTestCase.randomList(1, maxValues, this::generateValue); List in = values.stream().map(Tuple::v1).toList(); - List outPrimary = new ArrayList<>(); - List outExtraValues = new ArrayList<>(); + + List validValues = new ArrayList<>(); + List ignoredValues = new ArrayList<>(); values.stream().map(Tuple::v2).forEach(v -> { - if (exampleSortsUsingIgnoreAbove && ignoreAbove != null && v.length() > ignoreAbove) { - outExtraValues.add(v); + if (ignoreAbove != null && v.length() > ignoreAbove) { + ignoredValues.add(v); } else { - outPrimary.add(v); + validValues.add(v); } }); - List outList = store ? outPrimary : new HashSet<>(outPrimary).stream().sorted().collect(Collectors.toList()); + List outputFromDocValues = new HashSet<>(validValues).stream().sorted().collect(Collectors.toList()); + + Object out; + if (preservesExactSource()) { + out = in; + } else { + var validValuesInCorrectOrder = store ? validValues : outputFromDocValues; + var syntheticSourceOutputList = Stream.concat(validValuesInCorrectOrder.stream(), ignoredValues.stream()).toList(); + out = syntheticSourceOutputList.size() == 1 ? syntheticSourceOutputList.get(0) : syntheticSourceOutputList; + } + List loadBlock; if (loadBlockFromSource) { // The block loader infrastructure will never return nulls. Just zap them all. - loadBlock = in.stream().filter(m -> m != null).toList(); + loadBlock = in.stream().filter(Objects::nonNull).toList(); } else if (docValues) { - loadBlock = new HashSet<>(outPrimary).stream().sorted().collect(Collectors.toList()); + loadBlock = List.copyOf(outputFromDocValues); } else { - loadBlock = List.copyOf(outList); + // Meaning loading from terms. + loadBlock = List.copyOf(validValues); } + Object loadBlockResult = loadBlock.size() == 1 ? loadBlock.get(0) : loadBlock; - outList.addAll(outExtraValues); - Object out = outList.size() == 1 ? outList.get(0) : outList; return new MapperTestCase.SyntheticSourceExample(in, out, loadBlockResult, this::mapping); } @@ -110,13 +129,6 @@ private void mapping(XContentBuilder b) throws IOException { @Override public List invalidExample() throws IOException { return List.of( - new MapperTestCase.SyntheticSourceInvalidExample( - equalTo( - "field [field] of type [keyword] doesn't support synthetic source because " - + "it doesn't have doc values and isn't stored" - ), - b -> b.field("type", "keyword").field("doc_values", false) - ), new MapperTestCase.SyntheticSourceInvalidExample( equalTo("field [field] of type [keyword] doesn't support synthetic source because it declares a normalizer"), b -> b.field("type", "keyword").field("normalizer", "lowercase") diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index a6b737f162547..dca4fa72b5530 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -225,6 +225,7 @@ protected class TestMapperServiceBuilder { private BooleanSupplier idFieldDataEnabled; private ScriptCompiler scriptCompiler; private MapperMetrics mapperMetrics; + private boolean applyDefaultMapping; public TestMapperServiceBuilder() { indexVersion = getVersion(); @@ -232,6 +233,7 @@ public TestMapperServiceBuilder() { idFieldDataEnabled = () -> true; scriptCompiler = MapperServiceTestCase.this::compileScript; mapperMetrics = MapperMetrics.NOOP; + applyDefaultMapping = true; } public TestMapperServiceBuilder indexVersion(IndexVersion indexVersion) { @@ -254,6 +256,11 @@ public TestMapperServiceBuilder mapperMetrics(MapperMetrics mapperMetrics) { return this; } + public TestMapperServiceBuilder applyDefaultMapping(boolean applyDefaultMapping) { + this.applyDefaultMapping = applyDefaultMapping; + return this; + } + public MapperService build() { IndexSettings indexSettings = createIndexSettings(indexVersion, settings); SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); @@ -269,7 +276,7 @@ public void onCache(ShardId shardId, Accountable accountable) {} public void onRemoval(ShardId shardId, Accountable accountable) {} }); - return new MapperService( + var mapperService = new MapperService( () -> TransportVersion.current(), indexSettings, createIndexAnalyzers(indexSettings), @@ -284,6 +291,12 @@ public void onRemoval(ShardId shardId, Accountable accountable) {} bitsetFilterCache::getBitSetProducer, mapperMetrics ); + + if (applyDefaultMapping && indexSettings.getMode().getDefaultMapping() != null) { + mapperService.merge(null, indexSettings.getMode().getDefaultMapping(), MapperService.MergeReason.MAPPING_UPDATE); + } + + return mapperService; } } @@ -764,7 +777,7 @@ protected TriFunction, MappedFieldType.F protected TriFunction, MappedFieldType.FielddataOperation, IndexFieldData> fieldDataLookup( Function> sourcePathsLookup ) { - return (mft, lookupSource, fdo) -> mft.fielddataBuilder(new FieldDataContext("test", lookupSource, sourcePathsLookup, fdo)) + return (mft, lookupSource, fdo) -> mft.fielddataBuilder(new FieldDataContext("test", null, lookupSource, sourcePathsLookup, fdo)) .build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 92c2dad4a7109..9eaace8f93e58 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -578,7 +578,13 @@ protected static void assertScriptDocValues(MapperService mapperService, Object } : SourceProvider.fromStoredFields(); SearchLookup searchLookup = new SearchLookup(null, null, sourceProvider); IndexFieldData sfd = ft.fielddataBuilder( - new FieldDataContext("", () -> searchLookup, Set::of, MappedFieldType.FielddataOperation.SCRIPT) + new FieldDataContext( + "", + mapperService.getIndexSettings(), + () -> searchLookup, + Set::of, + MappedFieldType.FielddataOperation.SCRIPT + ) ).build(null, null); LeafFieldData lfd = sfd.load(getOnlyLeafReader(searcher.getIndexReader()).getContext()); DocValuesScriptFieldFactory sff = lfd.getScriptFieldFactory("field"); @@ -1039,13 +1045,13 @@ protected String minimalIsInvalidRoutingPathErrorMessage(Mapper mapper) { return "All fields that match routing_path must be configured with [time_series_dimension: true] " + "or flattened fields with a list of dimensions in [time_series_dimensions] and " + "without the [script] parameter. [" - + mapper.name() + + mapper.fullPath() + "] was not a dimension."; } return "All fields that match routing_path must be configured with [time_series_dimension: true] " + "or flattened fields with a list of dimensions in [time_series_dimensions] and " + "without the [script] parameter. [" - + mapper.name() + + mapper.fullPath() + "] was [" + mapper.typeName() + "]."; @@ -1325,6 +1331,11 @@ public String indexName() { throw new UnsupportedOperationException(); } + @Override + public IndexSettings indexSettings() { + throw new UnsupportedOperationException(); + } + @Override public MappedFieldType.FieldExtractPreference fieldExtractPreference() { return columnReader ? DOC_VALUES : NONE; @@ -1521,6 +1532,21 @@ public final void testSyntheticSourceInvalid() throws IOException { } } + public final void testSyntheticSourceInNestedObject() throws IOException { + boolean ignoreMalformed = shouldUseIgnoreMalformed(); + SyntheticSourceExample syntheticSourceExample = syntheticSourceSupport(ignoreMalformed).example(5); + DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + b.startObject("obj").field("type", "nested").startObject("properties").startObject("field"); + syntheticSourceExample.mapping().accept(b); + b.endObject().endObject().endObject(); + })); + assertThat(syntheticSource(mapper, b -> { + b.startObject("obj"); + syntheticSourceExample.buildInput(b); + b.endObject(); + }), equalTo("{\"obj\":" + syntheticSourceExample.expected() + "}")); + } + @Override protected final T compileScript(Script script, ScriptContext context) { return ingestScriptSupport().compileScript(script, context); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java index b95619602573c..f207f3c4cd314 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java @@ -34,7 +34,7 @@ public MockFieldMapper(String fullName, MappedFieldType fieldType, MultiFields m @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()); + return new Builder(leafName()); } static String findSimpleName(String fullName) { @@ -92,7 +92,7 @@ public Builder copyTo(String field) { @Override public MockFieldMapper build(MapperBuilderContext context) { MultiFields multiFields = multiFieldsBuilder.build(this, context); - return new MockFieldMapper(name(), fieldType, multiFields, copyTo); + return new MockFieldMapper(leafName(), fieldType, multiFields, copyTo); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index c60a913a63b33..ec2bbc35a68b1 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -36,7 +36,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.matchesPattern; import static org.hamcrest.Matchers.notANumber; public abstract class NumberFieldMapperTests extends MapperTestCase { @@ -376,6 +375,14 @@ public void testAllowMultipleValuesField() throws IOException { assertThat(e.getCause().getMessage(), containsString("Only one field can be stored per key")); } + @Override + protected BlockReaderSupport getSupportedReaders(MapperService mapper, String loaderFieldName) { + MappedFieldType ft = mapper.fieldType(loaderFieldName); + // Block loader can either use doc values or source. + // So with synthetic source it only works when doc values are enabled. + return new BlockReaderSupport(ft.hasDocValues(), ft.hasDocValues(), mapper, loaderFieldName); + } + @Override protected Function loadBlockExpected() { return n -> ((Number) n); // Just assert it's a number @@ -391,6 +398,7 @@ protected Matcher blockItemMatcher(Object expected) { protected final class NumberSyntheticSourceSupport implements SyntheticSourceSupport { private final Long nullValue = usually() ? null : randomNumber().longValue(); private final boolean coerce = rarely(); + private final boolean docValues = randomBoolean(); private final Function round; private final boolean ignoreMalformed; @@ -400,10 +408,26 @@ protected NumberSyntheticSourceSupport(Function round, boolean i this.ignoreMalformed = ignoreMalformed; } + @Override + public boolean preservesExactSource() { + // We opt in into fallback synthetic source if there is no doc values + // which preserves exact source. + return docValues == false; + } + @Override public SyntheticSourceExample example(int maxVals) { if (randomBoolean()) { Tuple v = generateValue(); + if (preservesExactSource()) { + var rawInput = v.v1(); + + // This code actually runs with synthetic source disabled + // to test block loader loading from source. + // That's why we need to set expected block loader value here. + var blockLoaderResult = v.v2() instanceof Number n ? round.apply(n) : null; + return new SyntheticSourceExample(rawInput, rawInput, blockLoaderResult, this::mapping); + } if (v.v2() instanceof Number n) { Number result = round.apply(n); return new SyntheticSourceExample(v.v1(), result, result, this::mapping); @@ -413,19 +437,33 @@ public SyntheticSourceExample example(int maxVals) { } List> values = randomList(1, maxVals, this::generateValue); List in = values.stream().map(Tuple::v1).toList(); - List outList = values.stream() - .filter(v -> v.v2() instanceof Number) - .map(t -> round.apply((Number) t.v2())) - .sorted() - .collect(Collectors.toCollection(ArrayList::new)); - values.stream().filter(v -> false == v.v2() instanceof Number).map(v -> v.v2()).forEach(outList::add); - Object out = outList.size() == 1 ? outList.get(0) : outList; - - List outBlockList = values.stream() - .filter(v -> v.v2() instanceof Number) - .map(t -> round.apply((Number) t.v2())) - .sorted() - .collect(Collectors.toCollection(ArrayList::new)); + Object out; + List outBlockList; + if (preservesExactSource()) { + // This code actually runs with synthetic source disabled + // to test block loader loading from source. + // That's why we need to set expected block loader value here. + out = in; + outBlockList = values.stream() + .filter(v -> v.v2() instanceof Number) + .map(t -> round.apply((Number) t.v2())) + .collect(Collectors.toCollection(ArrayList::new)); + } else { + List outList = values.stream() + .filter(v -> v.v2() instanceof Number) + .map(t -> round.apply((Number) t.v2())) + .sorted() + .collect(Collectors.toCollection(ArrayList::new)); + values.stream().filter(v -> false == v.v2() instanceof Number).map(Tuple::v2).forEach(outList::add); + out = outList.size() == 1 ? outList.get(0) : outList; + + outBlockList = values.stream() + .filter(v -> v.v2() instanceof Number) + .map(t -> round.apply((Number) t.v2())) + .sorted() + .collect(Collectors.toCollection(ArrayList::new)); + } + Object outBlock = outBlockList.size() == 1 ? outBlockList.get(0) : outBlockList; return new SyntheticSourceExample(in, out, outBlock, this::mapping); } @@ -459,19 +497,14 @@ private void mapping(XContentBuilder b) throws IOException { if (ignoreMalformed) { b.field("ignore_malformed", true); } + if (docValues == false) { + b.field("doc_values", "false"); + } } @Override public List invalidExample() throws IOException { - return List.of( - new SyntheticSourceInvalidExample( - matchesPattern("field \\[field] of type \\[.+] doesn't support synthetic source because it doesn't have doc values"), - b -> { - minimalMapping(b); - b.field("doc_values", false); - } - ) - ); + return List.of(); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java index df4377adc3e35..953d71b9a791b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java @@ -10,6 +10,9 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; import org.hamcrest.Matcher; import java.io.IOException; @@ -19,6 +22,7 @@ import static org.elasticsearch.test.ESTestCase.between; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.hamcrest.Matchers.equalTo; @@ -78,48 +82,50 @@ public static void validateRoundTripReader(String syntheticSource, DirectoryRead private static class TextFieldFamilySyntheticSourceSupport implements MapperTestCase.SyntheticSourceSupport { private final String fieldType; - private final boolean storeTextField; - private final boolean storedKeywordField; - private final boolean indexText; + private final boolean store; + private final boolean index; private final Integer ignoreAbove; - private final KeywordFieldSyntheticSourceSupport keywordSupport; + private final KeywordFieldSyntheticSourceSupport keywordMultiFieldSyntheticSourceSupport; TextFieldFamilySyntheticSourceSupport(String fieldType, boolean supportsCustomIndexConfiguration) { this.fieldType = fieldType; - this.storeTextField = randomBoolean(); - this.storedKeywordField = storeTextField || randomBoolean(); - this.indexText = supportsCustomIndexConfiguration ? randomBoolean() : true; + this.store = randomBoolean(); + this.index = supportsCustomIndexConfiguration == false || randomBoolean(); this.ignoreAbove = randomBoolean() ? null : between(10, 100); - this.keywordSupport = new KeywordFieldSyntheticSourceSupport(ignoreAbove, storedKeywordField, null, false == storeTextField); + this.keywordMultiFieldSyntheticSourceSupport = new KeywordFieldSyntheticSourceSupport( + ignoreAbove, + randomBoolean(), + null, + false + ); } @Override public MapperTestCase.SyntheticSourceExample example(int maxValues) { - if (storeTextField) { - MapperTestCase.SyntheticSourceExample delegate = keywordSupport.example(maxValues, true); - return new MapperTestCase.SyntheticSourceExample( - delegate.inputValue(), - delegate.expectedForSyntheticSource(), - delegate.expectedForBlockLoader(), - b -> { - b.field("type", fieldType); - b.field("store", true); - if (indexText == false) { - b.field("index", false); - } + if (store) { + CheckedConsumer mapping = b -> { + b.field("type", fieldType); + b.field("store", true); + if (index == false) { + b.field("index", false); } - ); + }; + + return storedFieldExample(maxValues, mapping); } - // We'll load from _source if ignore_above is defined, otherwise we load from the keyword field. + + // Block loader will not use keyword multi-field if it has ignore_above configured. + // And in this case it will use values from source. boolean loadingFromSource = ignoreAbove != null; - MapperTestCase.SyntheticSourceExample delegate = keywordSupport.example(maxValues, loadingFromSource); + MapperTestCase.SyntheticSourceExample delegate = keywordMultiFieldSyntheticSourceSupport.example(maxValues, loadingFromSource); + return new MapperTestCase.SyntheticSourceExample( delegate.inputValue(), delegate.expectedForSyntheticSource(), delegate.expectedForBlockLoader(), b -> { b.field("type", fieldType); - if (indexText == false) { + if (index == false) { b.field("index", false); } b.startObject("fields"); @@ -133,6 +139,25 @@ public MapperTestCase.SyntheticSourceExample example(int maxValues) { ); } + private MapperTestCase.SyntheticSourceExample storedFieldExample( + int maxValues, + CheckedConsumer mapping + ) { + if (randomBoolean()) { + var randomString = randomString(); + return new MapperTestCase.SyntheticSourceExample(randomString, randomString, randomString, mapping); + } + + var list = ESTestCase.randomList(1, maxValues, this::randomString); + var output = list.size() == 1 ? list.get(0) : list; + + return new MapperTestCase.SyntheticSourceExample(list, output, output, mapping); + } + + private String randomString() { + return randomAlphaOfLengthBetween(0, 10); + } + @Override public List invalidExample() throws IOException { Matcher err = equalTo( diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 442a8c3b82dc6..0488614f04dfb 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -135,7 +135,11 @@ public abstract class IndexShardTestCase extends ESTestCase { protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { @Override - public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { + public void onRecoveryDone( + RecoveryState state, + ShardLongFieldRange timestampMillisFieldRange, + ShardLongFieldRange eventIngestedMillisFieldRange + ) { } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 50e723ebd49d2..a80996359c52f 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -424,6 +424,10 @@ public ShardLongFieldRange getTimestampRange() { return ShardLongFieldRange.EMPTY; } + @Override + public ShardLongFieldRange getEventIngestedRange() { + return ShardLongFieldRange.EMPTY; + } } public static void awaitIndexShardCloseAsyncTasks(IndicesClusterStateService indicesClusterStateService) { diff --git a/test/framework/src/main/java/org/elasticsearch/readiness/MockReadinessService.java b/test/framework/src/main/java/org/elasticsearch/readiness/MockReadinessService.java index e5841071a787b..e4ec541256f31 100644 --- a/test/framework/src/main/java/org/elasticsearch/readiness/MockReadinessService.java +++ b/test/framework/src/main/java/org/elasticsearch/readiness/MockReadinessService.java @@ -29,9 +29,9 @@ public class MockReadinessService extends ReadinessService { */ public static class TestPlugin extends Plugin {} - private static final int RETRIES = 3; + private static final int RETRIES = 30; - private static final int RETRY_DELAY_IN_MILLIS = 10; + private static final int RETRY_DELAY_IN_MILLIS = 100; private static final String METHOD_NOT_MOCKED = "This method has not been mocked"; diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index d39a8df80c26d..f3fc4479a21a4 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -89,7 +89,6 @@ import org.elasticsearch.index.mapper.FieldAliasMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.GeoPointFieldMapper; -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.IdLoader; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -198,7 +197,6 @@ public abstract class AggregatorTestCase extends ESTestCase { // A list of field types that should not be tested, or are not currently supported private static final List TYPE_TEST_BLACKLIST = List.of( ObjectMapper.CONTENT_TYPE, // Cannot aggregate objects - GeoShapeFieldMapper.CONTENT_TYPE, // Cannot aggregate geoshapes (yet) DenseVectorFieldMapper.CONTENT_TYPE, // Cannot aggregate dense vectors SparseVectorFieldMapper.CONTENT_TYPE, // Sparse vectors are no longer supported @@ -356,6 +354,7 @@ private AggregationContext createAggregationContext( .fielddataBuilder( new FieldDataContext( indexSettings.getIndex().getName(), + indexSettings, context.lookupSupplier(), context.sourcePathsLookup(), context.fielddataOperation() @@ -644,7 +643,8 @@ private A searchAndReduce( bigArraysForReduction, getMockScriptService(), () -> false, - builder + builder, + b -> {} ); AggregatorCollectorManager aggregatorCollectorManager = new AggregatorCollectorManager( aggregatorSupplier, @@ -669,7 +669,8 @@ private A searchAndReduce( bigArraysForReduction, getMockScriptService(), () -> false, - builder + builder, + b -> {} ); internalAggs = new ArrayList<>(internalAggs.subList(r, toReduceSize)); internalAggs.add(InternalAggregations.topLevelReduce(toReduce, reduceContext)); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/CentroidAggregationTestBase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/CentroidAggregationTestBase.java index 664590d65c818..23b17e8f3f163 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/CentroidAggregationTestBase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/CentroidAggregationTestBase.java @@ -102,7 +102,7 @@ public void testSingleValueFieldGetProperty() { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo((long) numDocs)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); CentroidAggregation geoCentroid = global.getAggregations().get(aggName()); InternalAggregation agg = (InternalAggregation) global; diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/SpatialBoundsAggregationTestBase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/SpatialBoundsAggregationTestBase.java index 81c9c37ad4f9a..9dae49f658211 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/SpatialBoundsAggregationTestBase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/SpatialBoundsAggregationTestBase.java @@ -67,7 +67,7 @@ public void testSingleValuedField_getProperty() { assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo((long) numDocs)); assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); + assertThat(global.getAggregations().asList().size(), equalTo(1)); SpatialBounds geobounds = global.getAggregations().get(aggName()); assertThat(geobounds, notNullValue()); diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java index 52d2f3f53a43e..6c84a9ba601cf 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java @@ -32,16 +32,12 @@ import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.index.query.AbstractGeometryQueryBuilder; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.hamcrest.CoreMatchers; -import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -67,11 +63,6 @@ public abstract class BasePointShapeQueryTestCase> getPlugins() { - return Collections.singleton(TestGeoShapeFieldMapperPlugin.class); - } - protected abstract void createMapping(String indexName, String fieldName, Settings settings) throws Exception; protected void createMapping(String indexName, String fieldName) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/search/rank/rerank/AbstractRerankerIT.java b/test/framework/src/main/java/org/elasticsearch/search/rank/rerank/AbstractRerankerIT.java index ae5f0329390d0..13a045d8a4654 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/rank/rerank/AbstractRerankerIT.java +++ b/test/framework/src/main/java/org/elasticsearch/search/rank/rerank/AbstractRerankerIT.java @@ -42,7 +42,7 @@ @ESIntegTestCase.ClusterScope(minNumDataNodes = 3) public abstract class AbstractRerankerIT extends ESIntegTestCase { - protected enum ThrowingRankBuilderType { + public enum ThrowingRankBuilderType { THROWING_QUERY_PHASE_SHARD_CONTEXT, THROWING_QUERY_PHASE_COORDINATOR_CONTEXT, THROWING_RANK_FEATURE_PHASE_SHARD_CONTEXT, @@ -94,7 +94,7 @@ public void testRerankerNoExceptions() throws Exception { int rank = 1; for (SearchHit searchHit : response.getHits().getHits()) { assertThat(searchHit, hasId(String.valueOf(5 - (rank - 1)))); - assertEquals(searchHit.getScore(), (0.5f - ((rank - 1) * 0.1f)), 1e-5f); + assertEquals(0.5f - ((rank - 1) * 0.1f), searchHit.getScore(), 1e-5f); assertThat(searchHit, hasRank(rank)); assertNotNull(searchHit.getFields().get(searchField)); rank++; @@ -139,7 +139,7 @@ public void testRerankerPagination() throws Exception { int rank = 3; for (SearchHit searchHit : response.getHits().getHits()) { assertThat(searchHit, hasId(String.valueOf(5 - (rank - 1)))); - assertEquals(searchHit.getScore(), (0.5f - ((rank - 1) * 0.1f)), 1e-5f); + assertEquals(0.5f - ((rank - 1) * 0.1f), searchHit.getScore(), 1e-5f); assertThat(searchHit, hasRank(rank)); assertNotNull(searchHit.getFields().get(searchField)); rank++; @@ -221,7 +221,7 @@ public void testNotAllShardsArePresentInFetchPhase() throws Exception { int rank = 1; for (SearchHit searchHit : response.getHits().getHits()) { assertThat(searchHit, hasId(String.valueOf(5 - (rank - 1)))); - assertEquals(searchHit.getScore(), (0.5f - ((rank - 1) * 0.1f)), 1e-5f); + assertEquals(0.5f - ((rank - 1) * 0.1f), searchHit.getScore(), 1e-5f); assertThat(searchHit, hasRank(rank)); assertNotNull(searchHit.getFields().get(searchField)); rank++; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 271df2a971fb1..a2d93bab3a505 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -59,6 +59,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -622,13 +623,13 @@ QueryRewriteContext createQueryRewriteContext() { } CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max) { - return new CoordinatorRewriteContext( - parserConfiguration, - this.client, - () -> nowInMillis, + DateFieldRangeInfo timestampFieldInfo = new DateFieldRangeInfo( + dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)), - dateFieldType + dateFieldType, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)) ); + return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo); } DataRewriteContext createDataContext() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 8526acc851c72..7fdc5765a90e8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -69,6 +69,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.action.search.SearchTransportService.FREE_CONTEXT_ACTION_NAME; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.test.NodeRoles.dataNode; @@ -130,6 +131,8 @@ public void tearDown() throws Exception { logger.trace("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName()); awaitIndexShardCloseAsyncTasks(); ensureNoInitializingShards(); + ensureAllFreeContextActionsAreConsumed(); + SearchService searchService = getInstanceFromNode(SearchService.class); assertThat(searchService.getActiveContexts(), equalTo(0)); assertThat(searchService.getOpenScrollContexts(), equalTo(0)); @@ -455,6 +458,14 @@ protected void ensureNoInitializingShards() { assertFalse("timed out waiting for shards to initialize", actionGet.isTimedOut()); } + /** + * waits until all free_context actions have been handled by the generic thread pool + */ + protected void ensureAllFreeContextActionsAreConsumed() throws Exception { + logger.info("--> waiting for all free_context tasks to complete within a reasonable time"); + safeGet(clusterAdmin().prepareListTasks().setActions(FREE_CONTEXT_ACTION_NAME + "*").setWaitForCompletion(true).execute()); + } + /** * Whether we'd like to enable inter-segment search concurrency and increase the likelihood of leveraging it, by creating multiple * slices with a low amount of documents in them, which would not be allowed in production. diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 0d20c613b27a8..7295dce7a257a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -38,6 +38,7 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.RequestBuilder; @@ -48,6 +49,7 @@ import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -75,6 +77,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -177,12 +180,14 @@ import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.function.Consumer; +import java.util.function.IntConsumer; import java.util.function.IntFunction; import java.util.function.Predicate; import java.util.function.Supplier; @@ -649,7 +654,7 @@ protected final void assertSettingDeprecationsAndWarnings(final Setting[] set /** * Convenience method to assert warnings for settings deprecations and general deprecation warnings. All warnings passed to this method * are assumed to be at WARNING level. - * @param expectedWarnings expected general deprecation warnings. + * @param expectedWarnings expected general deprecation warning messages. */ protected final void assertWarnings(String... expectedWarnings) { assertWarnings( @@ -663,7 +668,7 @@ protected final void assertWarnings(String... expectedWarnings) { /** * Convenience method to assert warnings for settings deprecations and general deprecation warnings. All warnings passed to this method * are assumed to be at CRITICAL level. - * @param expectedWarnings expected general deprecation warnings. + * @param expectedWarnings expected general deprecation warning messages. */ protected final void assertCriticalWarnings(String... expectedWarnings) { assertWarnings( @@ -997,6 +1002,13 @@ public static int randomNonNegativeInt() { return randomInt() & Integer.MAX_VALUE; } + /** + * @return an int between Integer.MIN_VALUE and -1 (inclusive) chosen uniformly at random. + */ + public static int randomNegativeInt() { + return randomInt() | Integer.MIN_VALUE; + } + public static float randomFloat() { return random().nextFloat(); } @@ -2211,6 +2223,10 @@ protected static SecureRandom secureRandomFips(final byte[] seed) throws NoSuchA */ public static final TimeValue SAFE_AWAIT_TIMEOUT = TimeValue.timeValueSeconds(10); + /** + * Await on the given {@link CyclicBarrier} with a timeout of {@link #SAFE_AWAIT_TIMEOUT}, preserving the thread's interrupt status flag + * and converting all exceptions into an {@link AssertionError} to trigger a test failure. + */ public static void safeAwait(CyclicBarrier barrier) { try { barrier.await(SAFE_AWAIT_TIMEOUT.millis(), TimeUnit.MILLISECONDS); @@ -2222,6 +2238,10 @@ public static void safeAwait(CyclicBarrier barrier) { } } + /** + * Await on the given {@link CountDownLatch} with a timeout of {@link #SAFE_AWAIT_TIMEOUT}, preserving the thread's interrupt status + * flag and asserting that the latch is indeed completed before the timeout. + */ public static void safeAwait(CountDownLatch countDownLatch) { try { assertTrue( @@ -2234,10 +2254,18 @@ public static void safeAwait(CountDownLatch countDownLatch) { } } + /** + * Acquire a single permit from the given {@link Semaphore}, with a timeout of {@link #SAFE_AWAIT_TIMEOUT}, preserving the thread's + * interrupt status flag and asserting that the permit was successfully acquired. + */ public static void safeAcquire(Semaphore semaphore) { safeAcquire(1, semaphore); } + /** + * Acquire the specified number of permits from the given {@link Semaphore}, with a timeout of {@link #SAFE_AWAIT_TIMEOUT}, preserving + * the thread's interrupt status flag and asserting that the permits were all successfully acquired. + */ public static void safeAcquire(int permits, Semaphore semaphore) { try { assertTrue( @@ -2250,12 +2278,24 @@ public static void safeAcquire(int permits, Semaphore semaphore) { } } + /** + * Wait for the successful completion of the given {@link SubscribableListener}, with a timeout of {@link #SAFE_AWAIT_TIMEOUT}, + * preserving the thread's interrupt status flag and converting all exceptions into an {@link AssertionError} to trigger a test failure. + * + * @return The value with which the {@code listener} was completed. + */ public static T safeAwait(SubscribableListener listener) { final var future = new PlainActionFuture(); listener.addListener(future); return safeGet(future); } + /** + * Wait for the successful completion of the given {@link Future}, with a timeout of {@link #SAFE_AWAIT_TIMEOUT}, preserving the + * thread's interrupt status flag and converting all exceptions into an {@link AssertionError} to trigger a test failure. + * + * @return The value with which the {@code future} was completed. + */ public static T safeGet(Future future) { try { return future.get(SAFE_AWAIT_TIMEOUT.millis(), TimeUnit.MILLISECONDS); @@ -2269,6 +2309,27 @@ public static T safeGet(Future future) { } } + /** + * Call a {@link CheckedSupplier}, converting all exceptions into an {@link AssertionError}. Useful for avoiding + * try/catch boilerplate or cumbersome propagation of checked exceptions around something that should never throw. + * + * @return The value returned by the {@code supplier}. + */ + public static T safeGet(CheckedSupplier supplier) { + try { + return supplier.get(); + } catch (Exception e) { + return fail(e); + } + } + + /** + * Wait for the exceptional completion of the given {@link SubscribableListener}, with a timeout of {@link #SAFE_AWAIT_TIMEOUT}, + * preserving the thread's interrupt status flag and converting a successful completion, interrupt or timeout into an {@link + * AssertionError} to trigger a test failure. + * + * @return The exception with which the {@code listener} was completed exceptionally. + */ public static Exception safeAwaitFailure(SubscribableListener listener) { return safeAwait( SubscribableListener.newForked( @@ -2277,10 +2338,18 @@ public static Exception safeAwaitFailure(SubscribableListener listener) { ); } + /** + * Send the current thread to sleep for the given duration, asserting that the sleep is not interrupted but preserving the thread's + * interrupt status flag in any case. + */ public static void safeSleep(TimeValue timeValue) { safeSleep(timeValue.millis()); } + /** + * Send the current thread to sleep for the given number of milliseconds, asserting that the sleep is not interrupted but preserving the + * thread's interrupt status flag in any case. + */ public static void safeSleep(long millis) { try { Thread.sleep(millis); @@ -2290,6 +2359,34 @@ public static void safeSleep(long millis) { } } + /** + * Wait for all tasks currently running or enqueued on the given executor to complete. + */ + public static void flushThreadPoolExecutor(ThreadPool threadPool, String executorName) { + final var maxThreads = threadPool.info(executorName).getMax(); + final var barrier = new CyclicBarrier(maxThreads + 1); + final var executor = threadPool.executor(executorName); + for (int i = 0; i < maxThreads; i++) { + executor.execute(new AbstractRunnable() { + @Override + protected void doRun() { + safeAwait(barrier); + } + + @Override + public void onFailure(Exception e) { + fail(e, "unexpected"); + } + + @Override + public boolean isForceExecution() { + return true; + } + }); + } + safeAwait(barrier); + } + protected static boolean isTurkishLocale() { return Locale.getDefault().getLanguage().equals(new Locale("tr").getLanguage()) || Locale.getDefault().getLanguage().equals(new Locale("az").getLanguage()); @@ -2336,4 +2433,53 @@ public static T expectThrows(Class expectedType, Reques () -> builder.get().decRef() // dec ref if we unexpectedly fail to not leak transport response ); } + + /** + * Same as {@link #runInParallel(int, IntConsumer)} but also attempts to start all tasks at the same time by blocking execution on a + * barrier until all threads are started and ready to execute their task. + */ + public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + final CyclicBarrier barrier = new CyclicBarrier(numberOfTasks); + runInParallel(numberOfTasks, i -> { + safeAwait(barrier); + taskFactory.accept(i); + }); + } + + /** + * Run {@code numberOfTasks} parallel tasks that were created by the given {@code taskFactory}. On of the tasks will be run on the + * calling thread, the rest will be run on a new thread. + * @param numberOfTasks number of tasks to run in parallel + * @param taskFactory task factory + */ + public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + final ArrayList> futures = new ArrayList<>(numberOfTasks); + final Thread[] threads = new Thread[numberOfTasks - 1]; + for (int i = 0; i < numberOfTasks; i++) { + final int index = i; + var future = new FutureTask(() -> taskFactory.accept(index), null); + futures.add(future); + if (i == numberOfTasks - 1) { + future.run(); + } else { + threads[i] = new Thread(future); + threads[i].setName("runInParallel-T#" + i); + threads[i].start(); + } + } + for (Thread thread : threads) { + thread.join(); + } + Exception e = null; + for (Future future : futures) { + try { + future.get(); + } catch (Exception ex) { + e = ExceptionsHelper.useOrSuppress(e, ex); + } + } + if (e != null) { + throw new AssertionError(e); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 12c5085cbcd73..4aed7ff4565cb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -77,7 +77,7 @@ public static AggregationReduceContext.Builder emptyReduceContextBuilder(Aggrega return new AggregationReduceContext.Builder() { @Override public AggregationReduceContext forPartialReduction() { - return new AggregationReduceContext.ForPartial(BigArrays.NON_RECYCLING_INSTANCE, null, () -> false, aggs); + return new AggregationReduceContext.ForPartial(BigArrays.NON_RECYCLING_INSTANCE, null, () -> false, aggs, b -> {}); } @Override @@ -95,7 +95,7 @@ public static AggregationReduceContext.Builder mockReduceContext(AggregationBuil return new AggregationReduceContext.Builder() { @Override public AggregationReduceContext forPartialReduction() { - return new AggregationReduceContext.ForPartial(BigArrays.NON_RECYCLING_INSTANCE, null, () -> false, agg); + return new AggregationReduceContext.ForPartial(BigArrays.NON_RECYCLING_INSTANCE, null, () -> false, agg, b -> {}); } @Override @@ -244,7 +244,8 @@ public void testReduceRandom() throws IOException { bigArrays, mockScriptService, () -> false, - inputs.builder() + inputs.builder(), + b -> {} ); @SuppressWarnings("unchecked") T reduced = (T) reduce(toPartialReduce, context); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index bb78c43fca449..af37fb6feefbd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -61,8 +61,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Predicates; @@ -126,8 +124,6 @@ import java.util.TreeMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -148,6 +144,7 @@ import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.runInParallel; import static org.elasticsearch.test.ESTestCase.safeAwait; import static org.elasticsearch.test.NodeRoles.dataOnlyNode; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; @@ -246,8 +243,6 @@ public String toString() { private final NodeConfigurationSource nodeConfigurationSource; - private final ExecutorService executor; - private final boolean autoManageMasterNodes; private final Collection> mockPlugins; @@ -452,16 +447,6 @@ public InternalTestCluster( builder.put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write")); builder.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false); defaultSettings = builder.build(); - executor = EsExecutors.newScaling( - "internal_test_cluster_executor", - 0, - Integer.MAX_VALUE, - 0, - TimeUnit.SECONDS, - true, - EsExecutors.daemonThreadFactory("test_" + clusterName), - new ThreadContext(Settings.EMPTY) - ); } /** @@ -931,7 +916,6 @@ public synchronized void close() throws IOException { } finally { nodes = Collections.emptyNavigableMap(); Loggers.setLevel(nodeConnectionLogger, initialLogLevel); - executor.shutdownNow(); } } } @@ -1760,18 +1744,10 @@ private synchronized void startAndPublishNodesAndClients(List nod .filter(nac -> nodes.containsKey(nac.name) == false) // filter out old masters .count(); rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start - List> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList()); - try { - for (Future future : futures) { - future.get(); - } + runInParallel(nodeAndClients.size(), i -> nodeAndClients.get(i).startNode()); } catch (InterruptedException e) { throw new AssertionError("interrupted while starting nodes", e); - } catch (ExecutionException e) { - RuntimeException re = FutureUtils.rethrowExecutionException(e); - re.addSuppressed(new RuntimeException("failed to start nodes")); - throw re; } nodeAndClients.forEach(this::publishNode); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java index b4ecc36fc5b97..d0862c91537cb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java @@ -10,16 +10,19 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.Nullable; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static junit.framework.TestCase.assertFalse; import static junit.framework.TestCase.assertTrue; import static junit.framework.TestCase.fail; import static org.elasticsearch.test.ESIntegTestCase.client; @@ -59,30 +62,28 @@ private static void awaitTaskWithPrefix(String actionPrefix, Iterable checking that all tasks with prefix {} are marked as cancelled", actionPrefix); assertBusy(() -> { - boolean foundTask = false; + var tasks = new ArrayList(); for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { - final TaskManager taskManager = transportService.getTaskManager(); + var taskManager = transportService.getTaskManager(); assertTrue(taskManager.assertCancellableTaskConsistency()); - for (CancellableTask cancellableTask : taskManager.getCancellableTasks().values()) { - if (cancellableTask.getAction().startsWith(actionPrefix)) { - logger.trace("--> found task with prefix [{}]: [{}]", actionPrefix, cancellableTask); - foundTask = true; - assertTrue( - "task " + cancellableTask.getId() + "/" + cancellableTask.getAction() + " not cancelled", - cancellableTask.isCancelled() - ); - logger.trace("--> Task with prefix [{}] is marked as cancelled: [{}]", actionPrefix, cancellableTask); - } - } + taskManager.getCancellableTasks().values().stream().filter(t -> t.getAction().startsWith(actionPrefix)).forEach(tasks::add); } - assertTrue("found no cancellable tasks", foundTask); + assertFalse("no tasks found for action: " + actionPrefix, tasks.isEmpty()); + assertTrue( + tasks.toString(), + tasks.stream().allMatch(t -> t.isCancelled() && Objects.equals(t.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER), opaqueId)) + ); }, 30, TimeUnit.SECONDS); } + public static void assertAllCancellableTasksAreCancelled(String actionPrefix) throws Exception { + assertAllCancellableTasksAreCancelled(actionPrefix, null); + } + public static void assertAllTasksHaveFinished(String actionPrefix) throws Exception { logger.info("--> checking that all tasks with prefix {} have finished", actionPrefix); assertBusy(() -> { diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestGeoShapeFieldMapperPlugin.java b/test/framework/src/main/java/org/elasticsearch/test/TestGeoShapeFieldMapperPlugin.java deleted file mode 100644 index cd373432992d2..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/TestGeoShapeFieldMapperPlugin.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.test; - -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.Plugin; - -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; - -/** - * Some tests depend on the {@link org.elasticsearch.index.mapper.GeoShapeFieldMapper}. - * This mapper is registered in the spatial-extras module, but used in many integration - * tests in server code. The goal is to migrate all of the spatial/geo pieces to the spatial-extras - * module such that no tests in server depend on this test plugin - */ -@Deprecated -public class TestGeoShapeFieldMapperPlugin extends Plugin implements MapperPlugin { - - @Override - public Map getMappers() { - Map mappers = new LinkedHashMap<>(); - mappers.put(GeoShapeFieldMapper.CONTENT_TYPE, GeoShapeFieldMapper.PARSER); - return Collections.unmodifiableMap(mappers); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 5844dcbd66471..a591fc86979c3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -67,7 +67,7 @@ public void testFailure(Failure failure) throws Exception { boolean isBwcTest = Boolean.parseBoolean(System.getProperty("tests.bwc", "false")); // append Gradle test runner test filter string - b.append("'" + task + "'"); + b.append("\"" + task + "\""); if (isBwcTest) { // Use "legacy" method for bwc tests so that it applies globally to all upstream bwc test tasks b.append(" -Dtests.class=\""); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java index 78a4126ec09db..92d72afbf9d52 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -86,19 +86,6 @@ public boolean clusterHasFeature(String featureId) { Matcher matcher = VERSION_FEATURE_PATTERN.matcher(featureId); if (matcher.matches()) { Version extractedVersion = Version.fromString(matcher.group(1)); - if (Version.V_8_15_0.before(extractedVersion)) { - // As of version 8.14.0 REST tests have been migrated to use features only. - // For migration purposes we provide a synthetic version feature gte_vX.Y.Z for any version at or before 8.15.0 - // allowing for some transition period. - throw new IllegalArgumentException( - Strings.format( - "Synthetic version features are only available before [%s] for migration purposes! " - + "Please add a cluster feature to an appropriate FeatureSpecification; test-only historical-features " - + "can be supplied via ESRestTestCase#additionalTestOnlyHistoricalFeatures()", - Version.V_8_15_0 - ) - ); - } return version.onOrAfter(extractedVersion); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java index fad8575ae1d58..9fed08234f7a4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpNodeClient; import org.elasticsearch.threadpool.TestThreadPool; @@ -45,7 +45,7 @@ public abstract class RestActionTestCase extends ESTestCase { public void setUpController() { threadPool = createThreadPool(); verifyingClient = new VerifyingClient(threadPool); - controller = new RestController(null, verifyingClient, new NoneCircuitBreakerService(), new UsageService(), Tracer.NOOP); + controller = new RestController(null, verifyingClient, new NoneCircuitBreakerService(), new UsageService(), TelemetryProvider.NOOP); } @After diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 51893e551ba88..da478cbf1cb26 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.MasterNodeRequestHelper; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -574,7 +575,8 @@ public void sendRequest( RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); clonedRequest = reg.newRequest(bStream.bytes().streamInput()); } - assert clonedRequest.getClass().equals(request.getClass()) : clonedRequest + " vs " + request; + assert clonedRequest.getClass().equals(MasterNodeRequestHelper.unwrapTermOverride(request).getClass()) + : clonedRequest + " vs " + request; final RunOnce runnable = new RunOnce(new AbstractRunnable() { @Override diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java index 125c0563577fc..714c9bcde0469 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java @@ -45,6 +45,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -185,6 +186,10 @@ public void testRandomNonNegativeInt() { assertThat(randomNonNegativeInt(), greaterThanOrEqualTo(0)); } + public void testRandomNegativeInt() { + assertThat(randomNegativeInt(), lessThan(0)); + } + public void testRandomValueOtherThan() { // "normal" way of calling where the value is not null int bad = randomInt(); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index d555337f467ae..a8a33da27aebe 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -17,7 +17,11 @@ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null), - SEMANTIC_TEXT_ENABLED("es.semantic_text_feature_flag_enabled=true", Version.fromString("8.15.0"), null); + INFERENCE_ADAPTIVE_ALLOCATIONS_ENABLED( + "es.inference_adaptive_allocations_feature_flag_enabled=true", + Version.fromString("8.16.0"), + null + ); public final String systemProperty; public final Version from; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java index 9fe7fc647455e..c1f35b9a58d55 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java @@ -97,8 +97,8 @@ protected Parameter[] getParameters() { @Override public HistogramFieldMapper build(MapperBuilderContext context) { return new HistogramFieldMapper( - name(), - new HistogramFieldType(context.buildFullName(name()), meta.getValue()), + leafName(), + new HistogramFieldType(context.buildFullName(leafName()), meta.getValue()), multiFieldsBuilder.build(this, context), copyTo, this @@ -138,7 +138,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), ignoreMalformedByDefault).init(this); + return new Builder(leafName(), ignoreMalformedByDefault).init(this); } @Override @@ -289,7 +289,7 @@ protected boolean supportsParsingObject() { @Override public void parse(DocumentParserContext context) throws IOException { - context.path().add(simpleName()); + context.path().add(leafName()); boolean shouldStoreMalformedDataForSyntheticSource = context.mappingLookup().isSourceSynthetic() && ignoreMalformed(); XContentParser.Token token; @@ -334,7 +334,7 @@ public void parse(DocumentParserContext context) throws IOException { throw new DocumentParsingException( subParser.getTokenLocation(), "error parsing field [" - + name() + + fullPath() + "], [" + VALUES_FIELD + "] values must be in increasing order, got [" @@ -363,7 +363,7 @@ public void parse(DocumentParserContext context) throws IOException { } else { throw new DocumentParsingException( subParser.getTokenLocation(), - "error parsing field [" + name() + "], with unknown parameter [" + fieldName + "]" + "error parsing field [" + fullPath() + "], with unknown parameter [" + fieldName + "]" ); } token = subParser.nextToken(); @@ -371,20 +371,20 @@ public void parse(DocumentParserContext context) throws IOException { if (values == null) { throw new DocumentParsingException( subParser.getTokenLocation(), - "error parsing field [" + name() + "], expected field called [" + VALUES_FIELD.getPreferredName() + "]" + "error parsing field [" + fullPath() + "], expected field called [" + VALUES_FIELD.getPreferredName() + "]" ); } if (counts == null) { throw new DocumentParsingException( subParser.getTokenLocation(), - "error parsing field [" + name() + "], expected field called [" + COUNTS_FIELD.getPreferredName() + "]" + "error parsing field [" + fullPath() + "], expected field called [" + COUNTS_FIELD.getPreferredName() + "]" ); } if (values.size() != counts.size()) { throw new DocumentParsingException( subParser.getTokenLocation(), "error parsing field [" - + name() + + fullPath() + "], expected same length from [" + VALUES_FIELD.getPreferredName() + "] and " @@ -403,7 +403,7 @@ public void parse(DocumentParserContext context) throws IOException { if (count < 0) { throw new DocumentParsingException( subParser.getTokenLocation(), - "error parsing field [" + name() + "], [" + COUNTS_FIELD + "] elements must be >= 0 but got " + counts.get(i) + "error parsing field [" + fullPath() + "], [" + COUNTS_FIELD + "] elements must be >= 0 but got " + counts.get(i) ); } else if (count > 0) { // we do not add elements with count == 0 @@ -416,11 +416,11 @@ public void parse(DocumentParserContext context) throws IOException { } } BytesRef docValue = streamOutput.bytes().toBytesRef(); - Field field = new BinaryDocValuesField(name(), docValue); + Field field = new BinaryDocValuesField(fullPath(), docValue); if (context.doc().getByKey(fieldType().name()) != null) { throw new IllegalArgumentException( "Field [" - + name() + + fullPath() + "] of type [" + typeName() + "] doesn't support indexing multiple values for the same field in the same document" @@ -448,7 +448,7 @@ public void parse(DocumentParserContext context) throws IOException { } if (malformedDataForSyntheticSource != null) { - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), malformedDataForSyntheticSource)); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), malformedDataForSyntheticSource)); } context.addIgnoredField(fieldType().name()); @@ -516,15 +516,15 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [histogram] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [histogram] doesn't support synthetic source because it declares copy_to" ); } return new CompositeSyntheticFieldLoader( - simpleName(), - name(), + leafName(), + fullPath(), new HistogramSyntheticFieldLoader(), - new CompositeSyntheticFieldLoader.MalformedValuesLayer(name()) + new CompositeSyntheticFieldLoader.MalformedValuesLayer(fullPath()) ); } @@ -587,7 +587,7 @@ public void write(XContentBuilder b) throws IOException { @Override public String fieldName() { - return name(); + return fullPath(); } @Override diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregatorTests.java index 7ab1f555a6529..297dbf5233922 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregatorTests.java @@ -270,24 +270,24 @@ public void testEmptyBucket() throws IOException { }, (Consumer) histo -> { assertThat(histo.getBuckets().size(), equalTo(3)); - assertNotNull(histo.getBuckets().get(0).getAggregations().asMap().get("boxplot")); - InternalBoxplot boxplot = (InternalBoxplot) histo.getBuckets().get(0).getAggregations().asMap().get("boxplot"); + assertNotNull(histo.getBuckets().get(0).getAggregations().get("boxplot")); + InternalBoxplot boxplot = histo.getBuckets().get(0).getAggregations().get("boxplot"); assertEquals(1, boxplot.getMin(), 0); assertEquals(3, boxplot.getMax(), 0); assertEquals(1.5, boxplot.getQ1(), 0); assertEquals(2, boxplot.getQ2(), 0); assertEquals(2.5, boxplot.getQ3(), 0); - assertNotNull(histo.getBuckets().get(1).getAggregations().asMap().get("boxplot")); - boxplot = (InternalBoxplot) histo.getBuckets().get(1).getAggregations().asMap().get("boxplot"); + assertNotNull(histo.getBuckets().get(1).getAggregations().get("boxplot")); + boxplot = histo.getBuckets().get(1).getAggregations().get("boxplot"); assertEquals(Double.POSITIVE_INFINITY, boxplot.getMin(), 0); assertEquals(Double.NEGATIVE_INFINITY, boxplot.getMax(), 0); assertEquals(Double.NaN, boxplot.getQ1(), 0); assertEquals(Double.NaN, boxplot.getQ2(), 0); assertEquals(Double.NaN, boxplot.getQ3(), 0); - assertNotNull(histo.getBuckets().get(2).getAggregations().asMap().get("boxplot")); - boxplot = (InternalBoxplot) histo.getBuckets().get(2).getAggregations().asMap().get("boxplot"); + assertNotNull(histo.getBuckets().get(2).getAggregations().get("boxplot")); + boxplot = histo.getBuckets().get(2).getAggregations().get("boxplot"); assertEquals(21, boxplot.getMin(), 0); assertEquals(23, boxplot.getMax(), 0); assertEquals(21.5, boxplot.getQ1(), 0); @@ -337,8 +337,8 @@ public void testGetProperty() throws IOException { }, (Consumer) global -> { assertEquals(5, global.getDocCount()); assertTrue(AggregationInspectionHelper.hasValue(global)); - assertNotNull(global.getAggregations().asMap().get("boxplot")); - InternalBoxplot boxplot = (InternalBoxplot) global.getAggregations().asMap().get("boxplot"); + assertNotNull(global.getAggregations().get("boxplot")); + InternalBoxplot boxplot = global.getAggregations().get("boxplot"); assertThat(global.getProperty("boxplot"), equalTo(boxplot)); assertThat(global.getProperty("boxplot.min"), equalTo(1.0)); assertThat(global.getProperty("boxplot.max"), equalTo(5.0)); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java index ff345b1dac59c..7eea5b0f741c2 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilderTests.java @@ -161,10 +161,10 @@ public void testSupportsParallelCollection() { List fields = new ArrayList<>(); assertTrue(builder.supportsParallelCollection(field -> { fields.add(field); - return randomIntBetween(0, 10); + return randomIntBetween(0, 9); })); assertEquals(List.of("field1", "field2"), fields); - assertFalse(builder.supportsParallelCollection(field -> randomIntBetween(11, 100))); + assertFalse(builder.supportsParallelCollection(field -> randomIntBetween(10, 100))); terms.terms( List.of( sourceBuilder1.build(), @@ -183,14 +183,14 @@ public void testSupportsParallelCollection() { List.of(sourceBuilder1.build(), sourceBuilder2.build()) ); terms.shardSize(10); - assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(0, 10))); + assertTrue(terms.supportsParallelCollection(field -> randomIntBetween(0, 9))); terms.subAggregation(new TermsAggregationBuilder("name") { @Override public boolean supportsParallelCollection(ToLongFunction fieldCardinalityResolver) { return false; } }); - assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(0, 10))); + assertFalse(terms.supportsParallelCollection(field -> randomIntBetween(0, 9))); } { MultiValuesSourceFieldConfig.Builder sourceBuilder1 = new MultiValuesSourceFieldConfig.Builder(); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregatorTests.java index 26c71b8af5102..dcee4d4b05c21 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregatorTests.java @@ -410,20 +410,20 @@ public void testEmptyBucket() throws IOException { ); }, (Consumer) histo -> { assertEquals(3, histo.getBuckets().size()); - assertNotNull(histo.getBuckets().get(0).getAggregations().asMap().get("t_test")); - InternalTTest tTest = (InternalTTest) histo.getBuckets().get(0).getAggregations().asMap().get("t_test"); + assertNotNull(histo.getBuckets().get(0).getAggregations().get("t_test")); + InternalTTest tTest = histo.getBuckets().get(0).getAggregations().get("t_test"); assertEquals( tTestType == TTestType.PAIRED ? 0.1939778614 : tTestType == TTestType.HOMOSCEDASTIC ? 0.05878871029 : 0.07529006595, tTest.getValue(), 0.000001 ); - assertNotNull(histo.getBuckets().get(1).getAggregations().asMap().get("t_test")); - tTest = (InternalTTest) histo.getBuckets().get(1).getAggregations().asMap().get("t_test"); + assertNotNull(histo.getBuckets().get(1).getAggregations().get("t_test")); + tTest = histo.getBuckets().get(1).getAggregations().get("t_test"); assertEquals(Double.NaN, tTest.getValue(), 0.000001); - assertNotNull(histo.getBuckets().get(2).getAggregations().asMap().get("t_test")); - tTest = (InternalTTest) histo.getBuckets().get(2).getAggregations().asMap().get("t_test"); + assertNotNull(histo.getBuckets().get(2).getAggregations().get("t_test")); + tTest = histo.getBuckets().get(2).getAggregations().get("t_test"); assertEquals( tTestType == TTestType.PAIRED ? 0.6666666667 : tTestType == TTestType.HOMOSCEDASTIC ? 0.8593081179 : 0.8594865044, tTest.getValue(), @@ -475,8 +475,8 @@ public void testGetProperty() throws IOException { }, (Consumer) global -> { assertEquals(3, global.getDocCount()); assertTrue(AggregationInspectionHelper.hasValue(global)); - assertNotNull(global.getAggregations().asMap().get("t_test")); - InternalTTest tTest = (InternalTTest) global.getAggregations().asMap().get("t_test"); + assertNotNull(global.getAggregations().get("t_test")); + InternalTTest tTest = global.getAggregations().get("t_test"); assertEquals(tTest, global.getProperty("t_test")); assertEquals(0.1939778614, (Double) global.getProperty("t_test.value"), 0.000001); }, new AggTestConfig(globalBuilder, fieldType1, fieldType2)); diff --git a/x-pack/plugin/blob-cache/build.gradle b/x-pack/plugin/blob-cache/build.gradle index b938e38a152ae..da9c48438af28 100644 --- a/x-pack/plugin/blob-cache/build.gradle +++ b/x-pack/plugin/blob-cache/build.gradle @@ -16,5 +16,5 @@ esplugin { } dependencies { - compileOnly project(path: ':libs:elasticsearch-preallocate') + compileOnly project(path: ':libs:elasticsearch-native') } diff --git a/x-pack/plugin/blob-cache/src/main/java/module-info.java b/x-pack/plugin/blob-cache/src/main/java/module-info.java index 5d895401c273d..23c389b8cb353 100644 --- a/x-pack/plugin/blob-cache/src/main/java/module-info.java +++ b/x-pack/plugin/blob-cache/src/main/java/module-info.java @@ -8,7 +8,7 @@ module org.elasticsearch.blobcache { requires org.elasticsearch.base; requires org.elasticsearch.server; - requires org.elasticsearch.preallocate; + requires org.elasticsearch.nativeaccess; requires org.apache.logging.log4j; requires org.apache.lucene.core; diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java index 00cc9554a64eb..c85dc46d5d8e9 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java @@ -12,12 +12,13 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.core.Nullable; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.ExecutionException; import java.util.function.LongConsumer; -import java.util.function.Supplier; /** * An {@link ActionFuture} that listeners can be attached to. Listeners are executed when the future is completed @@ -200,7 +201,23 @@ public void addListener(ActionListener listener, long value) { assert invariant(); } - private static void executeListener(final ActionListener listener, final Supplier result) { + /** + * Return the result of this future, if it has been completed successfully, or unwrap and throw the exception with which it was + * completed exceptionally. It is not valid to call this method if the future is incomplete. + */ + private Long actionResult() throws Exception { + try { + return result(); + } catch (ExecutionException e) { + if (e.getCause() instanceof Exception exCause) { + throw exCause; + } else { + throw e; + } + } + } + + private static void executeListener(final ActionListener listener, final CheckedSupplier result) { try { listener.onResponse(result.get()); } catch (Exception e) { diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java index 051dfab1cdaa0..ad0d99104e8a4 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java @@ -18,10 +18,11 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.preallocate.Preallocate; +import org.elasticsearch.nativeaccess.NativeAccess; import java.io.IOException; import java.io.InputStream; +import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; @@ -78,7 +79,7 @@ public class SharedBytes extends AbstractRefCounted { Path cacheFile = null; if (fileSize > 0) { cacheFile = findCacheSnapshotCacheFilePath(environment, fileSize); - Preallocate.preallocate(cacheFile, fileSize); + preallocate(cacheFile, fileSize); this.fileChannel = FileChannel.open(cacheFile, OPEN_OPTIONS); assert this.fileChannel.size() == fileSize : "expected file size " + fileSize + " but was " + fileChannel.size(); } else { @@ -141,6 +142,24 @@ public static Path findCacheSnapshotCacheFilePath(NodeEnvironment environment, l } } + @SuppressForbidden(reason = "random access file needed to set file size") + static void preallocate(Path cacheFile, long fileSize) throws IOException { + // first try using native methods to preallocate space in the file + NativeAccess.instance().tryPreallocate(cacheFile, fileSize); + // even if allocation was successful above, verify again here + try (RandomAccessFile raf = new RandomAccessFile(cacheFile.toFile(), "rw")) { + if (raf.length() != fileSize) { + logger.info("pre-allocating cache file [{}] ({} bytes) using setLength method", cacheFile, fileSize); + raf.setLength(fileSize); + logger.debug("pre-allocated cache file [{}] using setLength method", cacheFile); + } + } catch (final Exception e) { + logger.warn(() -> "failed to pre-allocate cache file [" + cacheFile + "] using setLength method", e); + // if anything goes wrong, delete the potentially created file to not waste disk space + Files.deleteIfExists(cacheFile); + } + } + /** * Copy {@code length} bytes from {@code input} to {@code fc}, only doing writes aligned along {@link #PAGE_SIZE}. * diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 0c65c7e4b6d29..1ed59d6fe3581 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -51,7 +51,6 @@ dependencies { // security deps api 'com.unboundid:unboundid-ldapsdk:6.0.3' - api "com.nimbusds:nimbus-jose-jwt:9.23" implementation project(":x-pack:plugin:core:template-resources") @@ -135,27 +134,7 @@ tasks.named("thirdPartyAudit").configure { //commons-logging provided dependencies 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', - 'javax.jms.Message', - // Optional dependency of nimbus-jose-jwt for handling Ed25519 signatures and ECDH with X25519 (RFC 8037) - 'com.google.crypto.tink.subtle.Ed25519Sign', - 'com.google.crypto.tink.subtle.Ed25519Sign$KeyPair', - 'com.google.crypto.tink.subtle.Ed25519Verify', - 'com.google.crypto.tink.subtle.X25519', - 'com.google.crypto.tink.subtle.XChaCha20Poly1305', - // optional dependencies for nimbus-jose-jwt - 'org.bouncycastle.asn1.pkcs.PrivateKeyInfo', - 'org.bouncycastle.asn1.x509.AlgorithmIdentifier', - 'org.bouncycastle.asn1.x509.SubjectPublicKeyInfo', - 'org.bouncycastle.cert.X509CertificateHolder', - 'org.bouncycastle.cert.jcajce.JcaX509CertificateHolder', - 'org.bouncycastle.crypto.InvalidCipherTextException', - 'org.bouncycastle.crypto.engines.AESEngine', - 'org.bouncycastle.crypto.modes.GCMBlockCipher', - 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider', - 'org.bouncycastle.jce.provider.BouncyCastleProvider', - 'org.bouncycastle.openssl.PEMKeyPair', - 'org.bouncycastle.openssl.PEMParser', - 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter' + 'javax.jms.Message' ) } diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index a37946200a47d..72436bb9d5171 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -22,7 +22,6 @@ requires unboundid.ldapsdk; requires org.elasticsearch.tdigest; requires org.elasticsearch.xcore.templates; - requires com.nimbusds.jose.jwt; exports org.elasticsearch.index.engine.frozen; exports org.elasticsearch.license; @@ -119,9 +118,11 @@ exports org.elasticsearch.xpack.core.ml.job.process.autodetect.state; exports org.elasticsearch.xpack.core.ml.job.results; exports org.elasticsearch.xpack.core.ml.job.snapshot.upgrade; + exports org.elasticsearch.xpack.core.ml.ltr; exports org.elasticsearch.xpack.core.ml.notifications; exports org.elasticsearch.xpack.core.ml.packageloader.action; exports org.elasticsearch.xpack.core.ml.process.writer; + exports org.elasticsearch.xpack.core.ml.search; exports org.elasticsearch.xpack.core.ml.stats; exports org.elasticsearch.xpack.core.ml.utils.time; exports org.elasticsearch.xpack.core.ml.utils; @@ -227,8 +228,6 @@ exports org.elasticsearch.xpack.core.watcher.trigger; exports org.elasticsearch.xpack.core.watcher.watch; exports org.elasticsearch.xpack.core.watcher; - exports org.elasticsearch.xpack.core.ml.ltr; - exports org.elasticsearch.xpack.core.ml.search; provides org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber with diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java index de5b5e4d825a2..fd101e53cc90e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java @@ -173,7 +173,7 @@ public void snapshotShard(SnapshotShardContext context) { protected void closeInternal() { // do nothing; } - }, Store.OnClose.EMPTY); + }, Store.OnClose.EMPTY, mapperService.getIndexSettings().getIndexSortConfig().hasIndexSort()); Supplier querySupplier = mapperService.hasNested() ? () -> Queries.newNestedFilter(mapperService.getIndexSettings().getIndexVersionCreated()) : null; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 801ef2c463e95..4ed2e2a8e056c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -87,7 +87,7 @@ public final class XPackField { /** Name constant for the redact processor feature. */ public static final String REDACT_PROCESSOR = "redact_processor"; - /* Name for Universal Profiling. */ + /** Name for Universal Profiling. */ public static final String UNIVERSAL_PROFILING = "universal_profiling"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java index 46425a526c53f..fc9087d97bd79 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.xcontent.ToXContentFragment; @@ -195,7 +196,8 @@ public record CacheStats( long misses, long evictions, long hitsTimeInMillis, - long missesTimeInMillis + long missesTimeInMillis, + long cacheSizeInBytes ) implements Writeable, ToXContentFragment { public CacheStats(StreamInput in) throws IOException { @@ -206,7 +208,8 @@ public CacheStats(StreamInput in) throws IOException { in.readVLong(), in.readVLong(), in.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_ADDITIONAL_STATS) ? in.readLong() : -1, - in.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_ADDITIONAL_STATS) ? in.readLong() : -1 + in.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_ADDITIONAL_STATS) ? in.readLong() : -1, + in.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_STATS_SIZE_ADDED) ? in.readLong() : -1 ); } @@ -219,6 +222,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("evictions", evictions); builder.humanReadableField("hits_time_in_millis", "hits_time", new TimeValue(hitsTimeInMillis)); builder.humanReadableField("misses_time_in_millis", "misses_time", new TimeValue(missesTimeInMillis)); + builder.humanReadableField("size_in_bytes", "size", ByteSizeValue.ofBytes(cacheSizeInBytes)); return builder; } @@ -233,6 +237,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(hitsTimeInMillis); out.writeLong(missesTimeInMillis); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_STATS_SIZE_ADDED)) { + out.writeLong(cacheSizeInBytes); + } } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/ColumnInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/ColumnInfo.java index b3248077397c2..0c86b8ae4b757 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/ColumnInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/action/ColumnInfo.java @@ -7,52 +7,23 @@ package org.elasticsearch.xpack.core.esql.action; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.InstantiatingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - -public record ColumnInfo(String name, String type) implements Writeable { - - private static final InstantiatingObjectParser PARSER; - static { - InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( - "esql/column_info", - true, - ColumnInfo.class - ); - parser.declareString(constructorArg(), new ParseField("name")); - parser.declareString(constructorArg(), new ParseField("type")); - PARSER = parser.build(); +public interface ColumnInfo extends Writeable { + /* + static ColumnInfo fromXContent(XContentParser parser) { + return ColumnInfoImpl.PARSER.apply(parser, null); } - public static ColumnInfo fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } + */ - public ColumnInfo(StreamInput in) throws IOException { - this(in.readString(), in.readString()); - } + XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException; - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeString(type); - } + String name(); - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field("name", name); - builder.field("type", type); - builder.endObject(); - return builder; - } + String outputType(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java index 19542ef466156..e9d612751e48f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java @@ -11,8 +11,10 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xcontent.XContentBuilder; @@ -105,38 +107,61 @@ public static class Response extends AcknowledgedResponse { private final String PIPELINE_IDS = "pipelines"; Set pipelineIds; + private final String REFERENCED_INDEXES = "indexes"; + Set indexes; + private final String DRY_RUN_MESSAGE = "error_message"; // error message only returned in response for dry_run + String dryRunMessage; - public Response(boolean acknowledged, Set pipelineIds) { + public Response(boolean acknowledged, Set pipelineIds, Set semanticTextIndexes, @Nullable String dryRunMessage) { super(acknowledged); this.pipelineIds = pipelineIds; + this.indexes = semanticTextIndexes; + this.dryRunMessage = dryRunMessage; } public Response(StreamInput in) throws IOException { super(in); - pipelineIds = in.readCollectionAsSet(StreamInput::readString); + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_ENHANCE_DELETE_ENDPOINT)) { + pipelineIds = in.readCollectionAsSet(StreamInput::readString); + } else { + pipelineIds = Set.of(); + } + + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS)) { + indexes = in.readCollectionAsSet(StreamInput::readString); + dryRunMessage = in.readOptionalString(); + } else { + indexes = Set.of(); + dryRunMessage = null; + } + } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeCollection(pipelineIds, StreamOutput::writeString); + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_ENHANCE_DELETE_ENDPOINT)) { + out.writeCollection(pipelineIds, StreamOutput::writeString); + } + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS)) { + out.writeCollection(indexes, StreamOutput::writeString); + out.writeOptionalString(dryRunMessage); + } } @Override protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { super.addCustomFields(builder, params); builder.field(PIPELINE_IDS, pipelineIds); + builder.field(REFERENCED_INDEXES, indexes); + if (dryRunMessage != null) { + builder.field(DRY_RUN_MESSAGE, dryRunMessage); + } } @Override public String toString() { - StringBuilder returnable = new StringBuilder(); - returnable.append("acknowledged: ").append(this.acknowledged); - returnable.append(", pipelineIdsByEndpoint: "); - for (String entry : pipelineIds) { - returnable.append(entry).append(", "); - } - return returnable.toString(); + return Strings.toString(this); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index cfd4da0d59e31..229285510249c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -38,6 +38,8 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.core.Strings.format; + public class InferenceAction extends ActionType { public static final InferenceAction INSTANCE = new InferenceAction(); @@ -165,14 +167,29 @@ public TimeValue getInferenceTimeout() { public ActionRequestValidationException validate() { if (input == null) { var e = new ActionRequestValidationException(); - e.addValidationError("missing input"); + e.addValidationError("Field [input] cannot be null"); return e; } + if (input.isEmpty()) { var e = new ActionRequestValidationException(); - e.addValidationError("input array is empty"); + e.addValidationError("Field [input] cannot be an empty array"); return e; } + + if (taskType.equals(TaskType.RERANK)) { + if (query == null) { + var e = new ActionRequestValidationException(); + e.addValidationError(format("Field [query] cannot be null for task type [%s]", TaskType.RERANK)); + return e; + } + if (query.isEmpty()) { + var e = new ActionRequestValidationException(); + e.addValidationError(format("Field [query] cannot be empty for task type [%s]", TaskType.RERANK)); + return e; + } + } + return null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java index 9b383b2652af4..c6976ab4b513e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -18,6 +19,7 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -34,15 +36,22 @@ private CreateTrainedModelAssignmentAction() { public static class Request extends MasterNodeRequest { private final StartTrainedModelDeploymentAction.TaskParams taskParams; + private final AdaptiveAllocationsSettings adaptiveAllocationsSettings; - public Request(StartTrainedModelDeploymentAction.TaskParams taskParams) { + public Request(StartTrainedModelDeploymentAction.TaskParams taskParams, AdaptiveAllocationsSettings adaptiveAllocationsSettings) { super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.taskParams = ExceptionsHelper.requireNonNull(taskParams, "taskParams"); + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; } public Request(StreamInput in) throws IOException { super(in); this.taskParams = new StartTrainedModelDeploymentAction.TaskParams(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); + } else { + this.adaptiveAllocationsSettings = null; + } } @Override @@ -54,6 +63,9 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); taskParams.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalWriteable(adaptiveAllocationsSettings); + } } @Override @@ -61,17 +73,22 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; - return Objects.equals(taskParams, request.taskParams); + return Objects.equals(taskParams, request.taskParams) + && Objects.equals(adaptiveAllocationsSettings, request.adaptiveAllocationsSettings); } @Override public int hashCode() { - return Objects.hash(taskParams); + return Objects.hash(taskParams, adaptiveAllocationsSettings); } public StartTrainedModelDeploymentAction.TaskParams getTaskParams() { return taskParams; } + + public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { + return adaptiveAllocationsSettings; + } } public static class Response extends ActionResponse implements ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index ca9b86a90f875..59eaf4affa9a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -29,8 +29,11 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlTaskParams; @@ -40,7 +43,6 @@ import java.util.Optional; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.xpack.core.ml.MlTasks.trainedModelAssignmentTaskDescription; public class StartTrainedModelDeploymentAction extends ActionType { @@ -99,6 +101,7 @@ public static class Request extends MasterNodeRequest implements ToXCon public static final ParseField QUEUE_CAPACITY = TaskParams.QUEUE_CAPACITY; public static final ParseField CACHE_SIZE = TaskParams.CACHE_SIZE; public static final ParseField PRIORITY = TaskParams.PRIORITY; + public static final ParseField ADAPTIVE_ALLOCATIONS = TrainedModelAssignment.ADAPTIVE_ALLOCATIONS; public static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); @@ -117,6 +120,14 @@ public static class Request extends MasterNodeRequest implements ToXCon ObjectParser.ValueType.VALUE ); PARSER.declareString(Request::setPriority, PRIORITY); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + PARSER.declareObjectOrNull( + Request::setAdaptiveAllocationsSettings, + (p, c) -> AdaptiveAllocationsSettings.PARSER.parse(p, c).build(), + null, + ADAPTIVE_ALLOCATIONS + ); + } } public static Request parseRequest(String modelId, String deploymentId, XContentParser parser) { @@ -140,7 +151,8 @@ public static Request parseRequest(String modelId, String deploymentId, XContent private TimeValue timeout = DEFAULT_TIMEOUT; private AllocationStatus.State waitForState = DEFAULT_WAITFOR_STATE; private ByteSizeValue cacheSize; - private int numberOfAllocations = DEFAULT_NUM_ALLOCATIONS; + private Integer numberOfAllocations; + private AdaptiveAllocationsSettings adaptiveAllocationsSettings = null; private int threadsPerAllocation = DEFAULT_NUM_THREADS; private int queueCapacity = DEFAULT_QUEUE_CAPACITY; private Priority priority = DEFAULT_PRIORITY; @@ -160,7 +172,11 @@ public Request(StreamInput in) throws IOException { modelId = in.readString(); timeout = in.readTimeValue(); waitForState = in.readEnum(AllocationStatus.State.class); - numberOfAllocations = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + numberOfAllocations = in.readOptionalVInt(); + } else { + numberOfAllocations = in.readVInt(); + } threadsPerAllocation = in.readVInt(); queueCapacity = in.readVInt(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { @@ -171,12 +187,16 @@ public Request(StreamInput in) throws IOException { } else { this.priority = Priority.NORMAL; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.deploymentId = in.readString(); } else { this.deploymentId = modelId; } + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); + } else { + this.adaptiveAllocationsSettings = null; + } } public final void setModelId(String modelId) { @@ -212,14 +232,34 @@ public Request setWaitForState(AllocationStatus.State waitForState) { return this; } - public int getNumberOfAllocations() { + public Integer getNumberOfAllocations() { return numberOfAllocations; } - public void setNumberOfAllocations(int numberOfAllocations) { + public int computeNumberOfAllocations() { + if (numberOfAllocations != null) { + return numberOfAllocations; + } else { + if (adaptiveAllocationsSettings == null || adaptiveAllocationsSettings.getMinNumberOfAllocations() == null) { + return DEFAULT_NUM_ALLOCATIONS; + } else { + return adaptiveAllocationsSettings.getMinNumberOfAllocations(); + } + } + } + + public void setNumberOfAllocations(Integer numberOfAllocations) { this.numberOfAllocations = numberOfAllocations; } + public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { + return adaptiveAllocationsSettings; + } + + public void setAdaptiveAllocationsSettings(AdaptiveAllocationsSettings adaptiveAllocationsSettings) { + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; + } + public int getThreadsPerAllocation() { return threadsPerAllocation; } @@ -258,7 +298,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(modelId); out.writeTimeValue(timeout); out.writeEnum(waitForState); - out.writeVInt(numberOfAllocations); + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalVInt(numberOfAllocations); + } else { + out.writeVInt(numberOfAllocations); + } out.writeVInt(threadsPerAllocation); out.writeVInt(queueCapacity); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { @@ -270,6 +314,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(deploymentId); } + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalWriteable(adaptiveAllocationsSettings); + } } @Override @@ -279,7 +326,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DEPLOYMENT_ID.getPreferredName(), deploymentId); builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); builder.field(WAIT_FOR.getPreferredName(), waitForState); - builder.field(NUMBER_OF_ALLOCATIONS.getPreferredName(), numberOfAllocations); + if (numberOfAllocations != null) { + builder.field(NUMBER_OF_ALLOCATIONS.getPreferredName(), numberOfAllocations); + } + if (adaptiveAllocationsSettings != null) { + builder.field(ADAPTIVE_ALLOCATIONS.getPreferredName(), adaptiveAllocationsSettings); + } builder.field(THREADS_PER_ALLOCATION.getPreferredName(), threadsPerAllocation); builder.field(QUEUE_CAPACITY.getPreferredName(), queueCapacity); if (cacheSize != null) { @@ -301,12 +353,25 @@ public ActionRequestValidationException validate() { + Strings.arrayToCommaDelimitedString(VALID_WAIT_STATES) ); } - if (numberOfAllocations < 1) { - validationException.addValidationError("[" + NUMBER_OF_ALLOCATIONS + "] must be a positive integer"); + if (numberOfAllocations != null) { + if (numberOfAllocations < 1) { + validationException.addValidationError("[" + NUMBER_OF_ALLOCATIONS + "] must be a positive integer"); + } + if (adaptiveAllocationsSettings != null && adaptiveAllocationsSettings.getEnabled()) { + validationException.addValidationError( + "[" + NUMBER_OF_ALLOCATIONS + "] cannot be set if adaptive allocations is enabled" + ); + } } if (threadsPerAllocation < 1) { validationException.addValidationError("[" + THREADS_PER_ALLOCATION + "] must be a positive integer"); } + ActionRequestValidationException autoscaleException = adaptiveAllocationsSettings == null + ? null + : adaptiveAllocationsSettings.validate(); + if (autoscaleException != null) { + validationException.addValidationErrors(autoscaleException.validationErrors()); + } if (threadsPerAllocation > MAX_THREADS_PER_ALLOCATION || isPowerOf2(threadsPerAllocation) == false) { validationException.addValidationError( "[" + THREADS_PER_ALLOCATION + "] must be a power of 2 less than or equal to " + MAX_THREADS_PER_ALLOCATION @@ -322,7 +387,7 @@ public ActionRequestValidationException validate() { validationException.addValidationError("[" + TIMEOUT + "] must be positive"); } if (priority == Priority.LOW) { - if (numberOfAllocations > 1) { + if (numberOfAllocations != null && numberOfAllocations > 1) { validationException.addValidationError("[" + NUMBER_OF_ALLOCATIONS + "] must be 1 when [" + PRIORITY + "] is low"); } if (threadsPerAllocation > 1) { @@ -344,6 +409,7 @@ public int hashCode() { timeout, waitForState, numberOfAllocations, + adaptiveAllocationsSettings, threadsPerAllocation, queueCapacity, cacheSize, @@ -365,7 +431,8 @@ public boolean equals(Object obj) { && Objects.equals(timeout, other.timeout) && Objects.equals(waitForState, other.waitForState) && Objects.equals(cacheSize, other.cacheSize) - && numberOfAllocations == other.numberOfAllocations + && Objects.equals(numberOfAllocations, other.numberOfAllocations) + && Objects.equals(adaptiveAllocationsSettings, other.adaptiveAllocationsSettings) && threadsPerAllocation == other.threadsPerAllocation && queueCapacity == other.queueCapacity && priority == other.priority; @@ -430,7 +497,7 @@ public static boolean mayAssignToNode(@Nullable DiscoveryNode node) { PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), THREADS_PER_ALLOCATION); PARSER.declareInt(ConstructingObjectParser.constructorArg(), QUEUE_CAPACITY); PARSER.declareField( - optionalConstructorArg(), + ConstructingObjectParser.optionalConstructorArg(), (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), CACHE_SIZE.getPreferredName()), CACHE_SIZE, ObjectParser.ValueType.VALUE diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java index 62a7d84c60a62..28152bc0d5556 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -19,12 +20,15 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction.Request.ADAPTIVE_ALLOCATIONS; import static org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction.Request.MODEL_ID; import static org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction.Request.NUMBER_OF_ALLOCATIONS; @@ -46,6 +50,14 @@ public static class Request extends AcknowledgedRequest implements ToXC static { PARSER.declareString(Request::setDeploymentId, MODEL_ID); PARSER.declareInt(Request::setNumberOfAllocations, NUMBER_OF_ALLOCATIONS); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + PARSER.declareObjectOrNull( + Request::setAdaptiveAllocationsSettings, + (p, c) -> AdaptiveAllocationsSettings.PARSER.parse(p, c).build(), + AdaptiveAllocationsSettings.RESET_PLACEHOLDER, + ADAPTIVE_ALLOCATIONS + ); + } PARSER.declareString((r, val) -> r.ackTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); } @@ -62,7 +74,9 @@ public static Request parseRequest(String deploymentId, XContentParser parser) { } private String deploymentId; - private int numberOfAllocations; + private Integer numberOfAllocations; + private AdaptiveAllocationsSettings adaptiveAllocationsSettings; + private boolean isInternal; private Request() { super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); @@ -76,7 +90,15 @@ public Request(String deploymentId) { public Request(StreamInput in) throws IOException { super(in); deploymentId = in.readString(); - numberOfAllocations = in.readVInt(); + if (in.getTransportVersion().before(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + numberOfAllocations = in.readVInt(); + adaptiveAllocationsSettings = null; + isInternal = false; + } else { + numberOfAllocations = in.readOptionalVInt(); + adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); + isInternal = in.readBoolean(); + } } public final void setDeploymentId(String deploymentId) { @@ -87,26 +109,53 @@ public String getDeploymentId() { return deploymentId; } - public void setNumberOfAllocations(int numberOfAllocations) { + public void setNumberOfAllocations(Integer numberOfAllocations) { this.numberOfAllocations = numberOfAllocations; } - public int getNumberOfAllocations() { + public Integer getNumberOfAllocations() { return numberOfAllocations; } + public void setAdaptiveAllocationsSettings(AdaptiveAllocationsSettings adaptiveAllocationsSettings) { + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; + } + + public boolean isInternal() { + return isInternal; + } + + public void setIsInternal(boolean isInternal) { + this.isInternal = isInternal; + } + + public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { + return adaptiveAllocationsSettings; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(deploymentId); - out.writeVInt(numberOfAllocations); + if (out.getTransportVersion().before(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeVInt(numberOfAllocations); + } else { + out.writeOptionalVInt(numberOfAllocations); + out.writeOptionalWriteable(adaptiveAllocationsSettings); + out.writeBoolean(isInternal); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(MODEL_ID.getPreferredName(), deploymentId); - builder.field(NUMBER_OF_ALLOCATIONS.getPreferredName(), numberOfAllocations); + if (numberOfAllocations != null) { + builder.field(NUMBER_OF_ALLOCATIONS.getPreferredName(), numberOfAllocations); + } + if (adaptiveAllocationsSettings != null) { + builder.field(ADAPTIVE_ALLOCATIONS.getPreferredName(), adaptiveAllocationsSettings); + } builder.endObject(); return builder; } @@ -114,15 +163,28 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = new ActionRequestValidationException(); - if (numberOfAllocations < 1) { - validationException.addValidationError("[" + NUMBER_OF_ALLOCATIONS + "] must be a positive integer"); + if (numberOfAllocations != null) { + if (numberOfAllocations < 1) { + validationException.addValidationError("[" + NUMBER_OF_ALLOCATIONS + "] must be a positive integer"); + } + if (isInternal == false && adaptiveAllocationsSettings != null && adaptiveAllocationsSettings.getEnabled()) { + validationException.addValidationError( + "[" + NUMBER_OF_ALLOCATIONS + "] cannot be set if adaptive allocations is enabled" + ); + } + } + ActionRequestValidationException autoscaleException = adaptiveAllocationsSettings == null + ? null + : adaptiveAllocationsSettings.validate(); + if (autoscaleException != null) { + validationException.addValidationErrors(autoscaleException.validationErrors()); } return validationException.validationErrors().isEmpty() ? null : validationException; } @Override public int hashCode() { - return Objects.hash(deploymentId, numberOfAllocations); + return Objects.hash(deploymentId, numberOfAllocations, adaptiveAllocationsSettings, isInternal); } @Override @@ -134,7 +196,10 @@ public boolean equals(Object obj) { return false; } Request other = (Request) obj; - return Objects.equals(deploymentId, other.deploymentId) && numberOfAllocations == other.numberOfAllocations; + return Objects.equals(deploymentId, other.deploymentId) + && Objects.equals(numberOfAllocations, other.numberOfAllocations) + && Objects.equals(adaptiveAllocationsSettings, other.adaptiveAllocationsSettings) + && isInternal == other.isInternal; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java index 24fc24e43226b..f0909f75d9402 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java @@ -47,7 +47,6 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.time.Instant; -import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.HashMap; @@ -236,7 +235,7 @@ public static TrainedModelConfig.Builder fromXContent(XContentParser parser, boo this.description = description; this.tags = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(tags, TAGS)); this.metadata = metadata == null ? null : Collections.unmodifiableMap(metadata); - this.input = ExceptionsHelper.requireNonNull(handleDefaultInput(input, modelType), INPUT); + this.input = ExceptionsHelper.requireNonNull(handleDefaultInput(input, inferenceConfig, modelType), INPUT); if (ExceptionsHelper.requireNonNull(modelSize, MODEL_SIZE_BYTES) < 0) { throw new IllegalArgumentException("[" + MODEL_SIZE_BYTES.getPreferredName() + "] must be greater than or equal to 0"); } @@ -256,11 +255,12 @@ public static TrainedModelConfig.Builder fromXContent(XContentParser parser, boo this.prefixStrings = prefixStrings; } - private static TrainedModelInput handleDefaultInput(TrainedModelInput input, TrainedModelType modelType) { - if (modelType == null) { - return input; - } - return input == null ? modelType.getDefaultInput() : input; + private static TrainedModelInput handleDefaultInput( + TrainedModelInput input, + InferenceConfig inferenceConfig, + TrainedModelType modelType + ) { + return input == null && inferenceConfig != null ? inferenceConfig.getDefaultInput(modelType) : input; } public TrainedModelConfig(StreamInput in) throws IOException { @@ -963,20 +963,12 @@ public Builder validate(boolean forCreation) { break; } } - if (input != null && input.getFieldNames().isEmpty()) { - validationException = addValidationError("[input.field_names] must not be empty", validationException); - } - if (input != null - && input.getFieldNames() - .stream() - .filter(s -> s.contains(".")) - .flatMap(s -> Arrays.stream(Strings.delimitedListToStringArray(s, "."))) - .anyMatch(String::isEmpty)) { - validationException = addValidationError( - "[input.field_names] must only contain valid dot delimited field names", - validationException - ); + + // Delegate input validation to the inference config. + if (inferenceConfig != null) { + validationException = inferenceConfig.validateTrainedModelInput(input, forCreation, validationException); } + if (forCreation) { validationException = checkIllegalSetting(version, VERSION.getPreferredName(), validationException); validationException = checkIllegalSetting(createdBy, CREATED_BY.getPreferredName(), validationException); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/SemanticTextFeature.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsFeatureFlag.java similarity index 74% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/SemanticTextFeature.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsFeatureFlag.java index 4f2c5c564bcb8..a3b508c0534f9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/SemanticTextFeature.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsFeatureFlag.java @@ -5,18 +5,18 @@ * 2.0. */ -package org.elasticsearch.xpack.inference; +package org.elasticsearch.xpack.core.ml.inference.assignment; import org.elasticsearch.common.util.FeatureFlag; /** * semantic_text feature flag. When the feature is complete, this flag will be removed. */ -public class SemanticTextFeature { +public class AdaptiveAllocationsFeatureFlag { - private SemanticTextFeature() {} + private AdaptiveAllocationsFeatureFlag() {} - private static final FeatureFlag FEATURE_FLAG = new FeatureFlag("semantic_text"); + private static final FeatureFlag FEATURE_FLAG = new FeatureFlag("inference_adaptive_allocations"); public static boolean isEnabled() { return FEATURE_FLAG.isEnabled(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsSettings.java new file mode 100644 index 0000000000000..0b5a62ccb588c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AdaptiveAllocationsSettings.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.assignment; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class AdaptiveAllocationsSettings implements ToXContentObject, Writeable { + + public static final AdaptiveAllocationsSettings RESET_PLACEHOLDER = new AdaptiveAllocationsSettings(false, -1, -1); + + public static final ParseField ENABLED = new ParseField("enabled"); + public static final ParseField MIN_NUMBER_OF_ALLOCATIONS = new ParseField("min_number_of_allocations"); + public static final ParseField MAX_NUMBER_OF_ALLOCATIONS = new ParseField("max_number_of_allocations"); + + public static final ObjectParser PARSER = new ObjectParser<>( + "autoscaling_settings", + AdaptiveAllocationsSettings.Builder::new + ); + + static { + PARSER.declareBoolean(Builder::setEnabled, ENABLED); + PARSER.declareIntOrNull(Builder::setMinNumberOfAllocations, -1, MIN_NUMBER_OF_ALLOCATIONS); + PARSER.declareIntOrNull(Builder::setMaxNumberOfAllocations, -1, MAX_NUMBER_OF_ALLOCATIONS); + } + + public static AdaptiveAllocationsSettings parseRequest(XContentParser parser) { + return PARSER.apply(parser, null).build(); + } + + public static class Builder { + private Boolean enabled; + private Integer minNumberOfAllocations; + private Integer maxNumberOfAllocations; + + public Builder() {} + + public Builder(AdaptiveAllocationsSettings settings) { + enabled = settings.enabled; + minNumberOfAllocations = settings.minNumberOfAllocations; + maxNumberOfAllocations = settings.maxNumberOfAllocations; + } + + public void setEnabled(Boolean enabled) { + this.enabled = enabled; + } + + public void setMinNumberOfAllocations(Integer minNumberOfAllocations) { + this.minNumberOfAllocations = minNumberOfAllocations; + } + + public void setMaxNumberOfAllocations(Integer maxNumberOfAllocations) { + this.maxNumberOfAllocations = maxNumberOfAllocations; + } + + public AdaptiveAllocationsSettings build() { + return new AdaptiveAllocationsSettings(enabled, minNumberOfAllocations, maxNumberOfAllocations); + } + } + + private final Boolean enabled; + private final Integer minNumberOfAllocations; + private final Integer maxNumberOfAllocations; + + public AdaptiveAllocationsSettings(Boolean enabled, Integer minNumberOfAllocations, Integer maxNumberOfAllocations) { + this.enabled = enabled; + this.minNumberOfAllocations = minNumberOfAllocations; + this.maxNumberOfAllocations = maxNumberOfAllocations; + } + + public AdaptiveAllocationsSettings(StreamInput in) throws IOException { + enabled = in.readOptionalBoolean(); + minNumberOfAllocations = in.readOptionalInt(); + maxNumberOfAllocations = in.readOptionalInt(); + } + + public Boolean getEnabled() { + return enabled; + } + + public Integer getMinNumberOfAllocations() { + return minNumberOfAllocations; + } + + public Integer getMaxNumberOfAllocations() { + return maxNumberOfAllocations; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (enabled != null) { + builder.field(ENABLED.getPreferredName(), enabled); + } + if (minNumberOfAllocations != null) { + builder.field(MIN_NUMBER_OF_ALLOCATIONS.getPreferredName(), minNumberOfAllocations); + } + if (maxNumberOfAllocations != null) { + builder.field(MAX_NUMBER_OF_ALLOCATIONS.getPreferredName(), maxNumberOfAllocations); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalBoolean(enabled); + out.writeOptionalInt(minNumberOfAllocations); + out.writeOptionalInt(maxNumberOfAllocations); + } + + public AdaptiveAllocationsSettings merge(AdaptiveAllocationsSettings updates) { + AdaptiveAllocationsSettings.Builder builder = new Builder(this); + if (updates.getEnabled() != null) { + builder.setEnabled(updates.enabled); + } + if (updates.minNumberOfAllocations != null) { + if (updates.minNumberOfAllocations == -1) { + builder.setMinNumberOfAllocations(null); + } else { + builder.setMinNumberOfAllocations(updates.minNumberOfAllocations); + } + } + if (updates.maxNumberOfAllocations != null) { + if (updates.maxNumberOfAllocations == -1) { + builder.setMaxNumberOfAllocations(null); + } else { + builder.setMaxNumberOfAllocations(updates.maxNumberOfAllocations); + } + } + return builder.build(); + } + + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = new ActionRequestValidationException(); + boolean hasMinNumberOfAllocations = (minNumberOfAllocations != null && minNumberOfAllocations != -1); + if (hasMinNumberOfAllocations && minNumberOfAllocations < 1) { + validationException.addValidationError("[" + MIN_NUMBER_OF_ALLOCATIONS + "] must be a positive integer or null"); + } + boolean hasMaxNumberOfAllocations = (maxNumberOfAllocations != null && maxNumberOfAllocations != -1); + if (hasMaxNumberOfAllocations && maxNumberOfAllocations < 1) { + validationException.addValidationError("[" + MAX_NUMBER_OF_ALLOCATIONS + "] must be a positive integer or null"); + } + if (hasMinNumberOfAllocations && hasMaxNumberOfAllocations && minNumberOfAllocations > maxNumberOfAllocations) { + validationException.addValidationError( + "[" + MIN_NUMBER_OF_ALLOCATIONS + "] must not be larger than [" + MAX_NUMBER_OF_ALLOCATIONS + "]" + ); + } + return validationException.validationErrors().isEmpty() ? null : validationException; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AdaptiveAllocationsSettings that = (AdaptiveAllocationsSettings) o; + return Objects.equals(enabled, that.enabled) + && Objects.equals(minNumberOfAllocations, that.minNumberOfAllocations) + && Objects.equals(maxNumberOfAllocations, that.maxNumberOfAllocations); + } + + @Override + public int hashCode() { + return Objects.hash(enabled, minNumberOfAllocations, maxNumberOfAllocations); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java index d8e5d7a6d9603..aadaa5254ff15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java @@ -423,6 +423,8 @@ public int hashCode() { @Nullable private final Integer numberOfAllocations; @Nullable + private final AdaptiveAllocationsSettings adaptiveAllocationsSettings; + @Nullable private final Integer queueCapacity; @Nullable private final ByteSizeValue cacheSize; @@ -435,6 +437,7 @@ public AssignmentStats( String modelId, @Nullable Integer threadsPerAllocation, @Nullable Integer numberOfAllocations, + @Nullable AdaptiveAllocationsSettings adaptiveAllocationsSettings, @Nullable Integer queueCapacity, @Nullable ByteSizeValue cacheSize, Instant startTime, @@ -445,6 +448,7 @@ public AssignmentStats( this.modelId = modelId; this.threadsPerAllocation = threadsPerAllocation; this.numberOfAllocations = numberOfAllocations; + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; this.queueCapacity = queueCapacity; this.startTime = Objects.requireNonNull(startTime); this.nodeStats = nodeStats; @@ -479,6 +483,11 @@ public AssignmentStats(StreamInput in) throws IOException { } else { deploymentId = modelId; } + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); + } else { + adaptiveAllocationsSettings = null; + } } public String getDeploymentId() { @@ -499,6 +508,11 @@ public Integer getNumberOfAllocations() { return numberOfAllocations; } + @Nullable + public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { + return adaptiveAllocationsSettings; + } + @Nullable public Integer getQueueCapacity() { return queueCapacity; @@ -575,6 +589,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (numberOfAllocations != null) { builder.field(StartTrainedModelDeploymentAction.TaskParams.NUMBER_OF_ALLOCATIONS.getPreferredName(), numberOfAllocations); } + if (adaptiveAllocationsSettings != null) { + builder.field(StartTrainedModelDeploymentAction.Request.ADAPTIVE_ALLOCATIONS.getPreferredName(), adaptiveAllocationsSettings); + } if (queueCapacity != null) { builder.field(StartTrainedModelDeploymentAction.TaskParams.QUEUE_CAPACITY.getPreferredName(), queueCapacity); } @@ -649,6 +666,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(deploymentId); } + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalWriteable(adaptiveAllocationsSettings); + } } @Override @@ -660,6 +680,7 @@ public boolean equals(Object o) { && Objects.equals(modelId, that.modelId) && Objects.equals(threadsPerAllocation, that.threadsPerAllocation) && Objects.equals(numberOfAllocations, that.numberOfAllocations) + && Objects.equals(adaptiveAllocationsSettings, that.adaptiveAllocationsSettings) && Objects.equals(queueCapacity, that.queueCapacity) && Objects.equals(startTime, that.startTime) && Objects.equals(state, that.state) @@ -677,6 +698,7 @@ public int hashCode() { modelId, threadsPerAllocation, numberOfAllocations, + adaptiveAllocationsSettings, queueCapacity, startTime, nodeStats, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index b7219fbaa2061..60e0c0e86a828 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.common.time.TimeUtils; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -52,6 +53,7 @@ public final class TrainedModelAssignment implements SimpleDiffable PARSER = new ConstructingObjectParser<>( @@ -64,7 +66,8 @@ public final class TrainedModelAssignment implements SimpleDiffable AdaptiveAllocationsSettings.PARSER.parse(p, c).build(), + null, + ADAPTIVE_ALLOCATIONS + ); } private final StartTrainedModelDeploymentAction.TaskParams taskParams; @@ -96,6 +105,7 @@ public final class TrainedModelAssignment implements SimpleDiffable assignableNodeIds) { int allocations = nodeRoutingTable.entrySet() .stream() @@ -301,12 +324,21 @@ public boolean equals(Object o) { && Objects.equals(reason, that.reason) && Objects.equals(assignmentState, that.assignmentState) && Objects.equals(startTime, that.startTime) - && maxAssignedAllocations == that.maxAssignedAllocations; + && maxAssignedAllocations == that.maxAssignedAllocations + && Objects.equals(adaptiveAllocationsSettings, that.adaptiveAllocationsSettings); } @Override public int hashCode() { - return Objects.hash(nodeRoutingTable, taskParams, assignmentState, reason, startTime, maxAssignedAllocations); + return Objects.hash( + nodeRoutingTable, + taskParams, + assignmentState, + reason, + startTime, + maxAssignedAllocations, + adaptiveAllocationsSettings + ); } @Override @@ -320,6 +352,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.timeField(START_TIME.getPreferredName(), startTime); builder.field(MAX_ASSIGNED_ALLOCATIONS.getPreferredName(), maxAssignedAllocations); + builder.field(ADAPTIVE_ALLOCATIONS.getPreferredName(), adaptiveAllocationsSettings); builder.endObject(); return builder; } @@ -334,6 +367,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeVInt(maxAssignedAllocations); } + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalWriteable(adaptiveAllocationsSettings); + } } public Optional calculateAllocationStatus() { @@ -355,6 +391,7 @@ public static class Builder { private String reason; private Instant startTime; private int maxAssignedAllocations; + private AdaptiveAllocationsSettings adaptiveAllocationsSettings; public static Builder fromAssignment(TrainedModelAssignment assignment) { return new Builder( @@ -363,12 +400,20 @@ public static Builder fromAssignment(TrainedModelAssignment assignment) { assignment.assignmentState, assignment.reason, assignment.startTime, - assignment.maxAssignedAllocations + assignment.maxAssignedAllocations, + assignment.adaptiveAllocationsSettings ); } - public static Builder empty(StartTrainedModelDeploymentAction.TaskParams taskParams) { - return new Builder(taskParams); + public static Builder empty(CreateTrainedModelAssignmentAction.Request request) { + return new Builder(request.getTaskParams(), request.getAdaptiveAllocationsSettings()); + } + + public static Builder empty( + StartTrainedModelDeploymentAction.TaskParams taskParams, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { + return new Builder(taskParams, adaptiveAllocationsSettings); } private Builder( @@ -377,7 +422,8 @@ private Builder( AssignmentState assignmentState, String reason, Instant startTime, - int maxAssignedAllocations + int maxAssignedAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettings ) { this.taskParams = taskParams; this.nodeRoutingTable = new LinkedHashMap<>(nodeRoutingTable); @@ -385,10 +431,11 @@ private Builder( this.reason = reason; this.startTime = startTime; this.maxAssignedAllocations = maxAssignedAllocations; + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; } - private Builder(StartTrainedModelDeploymentAction.TaskParams taskParams) { - this(taskParams, new LinkedHashMap<>(), AssignmentState.STARTING, null, Instant.now(), 0); + private Builder(StartTrainedModelDeploymentAction.TaskParams taskParams, AdaptiveAllocationsSettings adaptiveAllocationsSettings) { + this(taskParams, new LinkedHashMap<>(), AssignmentState.STARTING, null, Instant.now(), 0, adaptiveAllocationsSettings); } public Builder setStartTime(Instant startTime) { @@ -401,6 +448,11 @@ public Builder setMaxAssignedAllocations(int maxAssignedAllocations) { return this; } + public Builder setAdaptiveAllocationsSettings(AdaptiveAllocationsSettings adaptiveAllocationsSettings) { + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; + return this; + } + public Builder addRoutingEntry(String nodeId, RoutingInfo routingInfo) { if (nodeRoutingTable.containsKey(nodeId)) { throw new ResourceAlreadyExistsException( @@ -518,7 +570,15 @@ public Builder setNumberOfAllocations(int numberOfAllocations) { } public TrainedModelAssignment build() { - return new TrainedModelAssignment(taskParams, nodeRoutingTable, assignmentState, reason, startTime, maxAssignedAllocations); + return new TrainedModelAssignment( + taskParams, + nodeRoutingTable, + assignmentState, + reason, + startTime, + maxAssignedAllocations, + adaptiveAllocationsSettings + ); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java index 8733e456157d2..92e833c250873 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java @@ -8,12 +8,21 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.VersionedNamedWriteable; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.ml.MlConfigVersion; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; +import java.util.Arrays; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + public interface InferenceConfig extends NamedXContentObject, VersionedNamedWriteable { String DEFAULT_TOP_CLASSES_RESULTS_FIELD = "top_classes"; @@ -65,6 +74,39 @@ default boolean supportsSearchRescorer() { return false; } + @Nullable + default TrainedModelInput getDefaultInput(TrainedModelType modelType) { + if (modelType == null) { + return null; + } + return modelType.getDefaultInput(); + } + + default ActionRequestValidationException validateTrainedModelInput( + TrainedModelInput input, + boolean forCreation, + ActionRequestValidationException validationException + ) { + + if (input != null && input.getFieldNames().isEmpty()) { + validationException = addValidationError("[input.field_names] must not be empty", validationException); + } + + if (input != null + && input.getFieldNames() + .stream() + .filter(s -> s.contains(".")) + .flatMap(s -> Arrays.stream(Strings.delimitedListToStringArray(s, "."))) + .anyMatch(String::isEmpty)) { + validationException = addValidationError( + "[input.field_names] must only contain valid dot delimited field names", + validationException + ); + } + + return validationException; + } + default ElasticsearchStatusException incompatibleUpdateException(String updateName) { throw ExceptionsHelper.badRequestException( "Inference config of type [{}] can not be updated with a inference request of type [{}]", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java index 293769371999e..7d515c9509c41 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -17,6 +18,8 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.MlConfigVersion; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearningToRankFeatureExtractorBuilder; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.QueryExtractorBuilder; import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; @@ -30,6 +33,8 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.elasticsearch.action.ValidateActions.addValidationError; + public class LearningToRankConfig extends RegressionConfig implements Rewriteable { public static final ParseField NAME = new ParseField("learning_to_rank"); @@ -43,6 +48,8 @@ public class LearningToRankConfig extends RegressionConfig implements Rewriteabl private static final ObjectParser LENIENT_PARSER = createParser(true); private static final ObjectParser STRICT_PARSER = createParser(false); + private static final TrainedModelInput DEFAULT_INPUT = new TrainedModelInput(List.of()); + private static ObjectParser createParser(boolean lenient) { ObjectParser parser = new ObjectParser<>( NAME.getPreferredName(), @@ -237,6 +244,24 @@ public LearningToRankConfig rewrite(QueryRewriteContext ctx) throws IOException return this; } + @Override + public TrainedModelInput getDefaultInput(TrainedModelType modelType) { + return DEFAULT_INPUT; + } + + @Override + public ActionRequestValidationException validateTrainedModelInput( + TrainedModelInput input, + boolean forCreation, + ActionRequestValidationException validationException + ) { + if (forCreation && input != null && input.getFieldNames().isEmpty() == false) { + return addValidationError("cannot specify [input.field_names] for a model of type [learning_to_rank]", validationException); + } + + return validationException; + } + public static class Builder { private Integer numTopFeatureImportanceValues; private List learningToRankFeatureExtractorBuilders; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilder.java index 84d5dbdaaf536..256c90c3eaa62 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilder.java @@ -15,6 +15,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -29,7 +31,12 @@ import java.util.Objects; import java.util.Set; +/** + * @deprecated Replaced by sparse_vector query + */ +@Deprecated public class WeightedTokensQueryBuilder extends AbstractQueryBuilder { + public static final String NAME = "weighted_tokens"; public static final ParseField TOKENS_FIELD = new ParseField("tokens"); @@ -41,6 +48,10 @@ public class WeightedTokensQueryBuilder extends AbstractQueryBuilder ALLOWED_FIELD_TYPES = Set.of("sparse_vector", "rank_features"); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class); + public static final String WEIGHTED_TOKENS_DEPRECATION_MESSAGE = NAME + + " is deprecated and will be removed. Use sparse_vector instead."; + public WeightedTokensQueryBuilder(String fieldName, List tokens) { this(fieldName, tokens, null); } @@ -153,6 +164,9 @@ private static float parseWeight(String token, Object weight) { } public static WeightedTokensQueryBuilder fromXContent(XContentParser parser) throws IOException { + + deprecationLogger.critical(DeprecationCategory.API, NAME, WEIGHTED_TOKENS_DEPRECATION_MESSAGE); + String currentFieldName = null; String fieldName = null; List tokens = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/SemanticTextInfoExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/SemanticTextInfoExtractor.java new file mode 100644 index 0000000000000..544c1e344c91f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/SemanticTextInfoExtractor.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + * + * this file was contributed to by a Generative AI + */ + +package org.elasticsearch.xpack.core.ml.utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.transport.Transports; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class SemanticTextInfoExtractor { + private static final Logger logger = LogManager.getLogger(SemanticTextInfoExtractor.class); + + public static Set extractIndexesReferencingInferenceEndpoints(Metadata metadata, Set endpointIds) { + assert Transports.assertNotTransportThread("non-trivial nested loops over cluster state structures"); + assert endpointIds.isEmpty() == false; + assert metadata != null; + + Set referenceIndices = new HashSet<>(); + + Map indices = metadata.indices(); + + indices.forEach((indexName, indexMetadata) -> { + if (indexMetadata.getInferenceFields() != null) { + Map inferenceFields = indexMetadata.getInferenceFields(); + if (inferenceFields.entrySet() + .stream() + .anyMatch( + entry -> entry.getValue().getInferenceId() != null && endpointIds.contains(entry.getValue().getInferenceId()) + )) { + referenceIndices.add(indexName); + } + } + }); + + return referenceIndices; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java index 30ad5e7902d19..52f8c7cf456d9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java @@ -9,6 +9,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.security.action.role.BulkRolesResponse; +import org.elasticsearch.xpack.core.security.action.role.QueryRoleResponse; import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; /** @@ -23,4 +25,7 @@ public final class ActionTypes { ); public static final ActionType QUERY_USER_ACTION = new ActionType<>("cluster:admin/xpack/security/user/query"); + public static final ActionType BULK_PUT_ROLES = new ActionType<>("cluster:admin/xpack/security/role/bulk_put"); + public static final ActionType QUERY_ROLE_ACTION = new ActionType<>("cluster:admin/xpack/security/role/query"); + public static final ActionType BULK_DELETE_ROLES = new ActionType<>("cluster:admin/xpack/security/role/bulk_delete"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java index b186ab45a7dc7..c98564251cd43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java @@ -7,19 +7,13 @@ package org.elasticsearch.xpack.core.security.action; -import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.support.BearerToken; -import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import java.io.IOException; @@ -136,30 +130,6 @@ public void setClientAuthentication(ClientAuthentication clientAuthentication) { this.clientAuthentication = clientAuthentication; } - public AuthenticationToken getAuthenticationToken() { - assert validate(null) == null : "grant is invalid"; - return switch (type) { - case PASSWORD_GRANT_TYPE -> new UsernamePasswordToken(username, password); - case ACCESS_TOKEN_GRANT_TYPE -> { - SecureString clientAuthentication = this.clientAuthentication != null ? this.clientAuthentication.value() : null; - AuthenticationToken token = JwtAuthenticationToken.tryParseJwt(accessToken, clientAuthentication); - if (token != null) { - yield token; - } - if (clientAuthentication != null) { - clientAuthentication.close(); - throw new ElasticsearchSecurityException( - "[client_authentication] not supported with the supplied access_token type", - RestStatus.BAD_REQUEST - ); - } - // here we effectively assume it's an ES access token (from the {@code TokenService}) - yield new BearerToken(accessToken); - } - default -> throw new ElasticsearchSecurityException("the grant type [{}] is not supported", type); - }; - } - public ActionRequestValidationException validate(ActionRequestValidationException validationException) { if (type == null) { validationException = addValidationError("[grant_type] is required", validationException); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkDeleteRolesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkDeleteRolesRequest.java new file mode 100644 index 0000000000000..d7009a683b0e9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkDeleteRolesRequest.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; + +import java.util.List; +import java.util.Objects; + +public class BulkDeleteRolesRequest extends ActionRequest { + + private List roleNames; + + public BulkDeleteRolesRequest(List roleNames) { + this.roleNames = roleNames; + } + + private WriteRequest.RefreshPolicy refreshPolicy = WriteRequest.RefreshPolicy.IMMEDIATE; + + @Override + public ActionRequestValidationException validate() { + // Handle validation where delete role is handled to produce partial success if validation fails + return null; + } + + public List getRoleNames() { + return roleNames; + } + + public BulkDeleteRolesRequest setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass() || super.equals(o)) return false; + + BulkDeleteRolesRequest that = (BulkDeleteRolesRequest) o; + return Objects.equals(roleNames, that.roleNames); + } + + @Override + public int hashCode() { + return Objects.hash(roleNames); + } + + public WriteRequest.RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilder.java new file mode 100644 index 0000000000000..cda45a67e81c6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Builder for requests to bulk add a roles to the security index + */ +public class BulkPutRoleRequestBuilder extends ActionRequestBuilder { + + private static final RoleDescriptor.Parser ROLE_DESCRIPTOR_PARSER = RoleDescriptor.parserBuilder().allowDescription(true).build(); + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser, Void> PARSER = new ConstructingObjectParser<>( + "bulk_update_roles_request_payload", + a -> (List) a[0] + ); + + static { + PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, n) -> { + p.nextToken(); + return ROLE_DESCRIPTOR_PARSER.parse(n, p, false); + }, new ParseField("roles")); + } + + public BulkPutRoleRequestBuilder(ElasticsearchClient client) { + super(client, ActionTypes.BULK_PUT_ROLES, new BulkPutRolesRequest(List.of())); + } + + public BulkPutRoleRequestBuilder content(BytesReference content, XContentType xContentType) throws IOException { + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + content, + xContentType + ); + List roles = PARSER.parse(parser, null); + request.setRoles(roles); + return this; + } + + public BulkPutRoleRequestBuilder setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { + request.setRefreshPolicy(refreshPolicy); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilderFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilderFactory.java new file mode 100644 index 0000000000000..a0c93c70363b0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRoleRequestBuilderFactory.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.client.internal.Client; + +public interface BulkPutRoleRequestBuilderFactory { + BulkPutRoleRequestBuilder create(Client client); + + class Default implements BulkPutRoleRequestBuilderFactory { + @Override + public BulkPutRoleRequestBuilder create(Client client) { + // This needs to be added when Bulk API is made public in serverless + return new BulkPutRoleRequestBuilder(client); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRolesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRolesRequest.java new file mode 100644 index 0000000000000..38648f81fbcdd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkPutRolesRequest.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class BulkPutRolesRequest extends ActionRequest { + + private List roles; + + public BulkPutRolesRequest(List roles) { + this.roles = roles; + } + + public void setRoles(List roles) { + this.roles = roles; + } + + private WriteRequest.RefreshPolicy refreshPolicy = WriteRequest.RefreshPolicy.IMMEDIATE; + + @Override + public ActionRequestValidationException validate() { + // Handle validation where put role is handled to produce partial success if validation fails + return null; + } + + public List getRoles() { + return roles; + } + + public BulkPutRolesRequest setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass() || super.equals(o)) return false; + + BulkPutRolesRequest that = (BulkPutRolesRequest) o; + return Objects.equals(roles, that.roles); + } + + @Override + public int hashCode() { + return Objects.hash(roles); + } + + public WriteRequest.RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkRolesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkRolesResponse.java new file mode 100644 index 0000000000000..b74cc1fa15a4a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/BulkRolesResponse.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class BulkRolesResponse extends ActionResponse implements ToXContentObject { + + private final List items; + + public static class Builder { + + private final List items = new LinkedList<>(); + + public Builder addItem(Item item) { + items.add(item); + return this; + } + + public BulkRolesResponse build() { + return new BulkRolesResponse(items); + } + } + + public BulkRolesResponse(List items) { + this.items = items; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + Map> itemsByType = items.stream().collect(Collectors.groupingBy(Item::getResultType)); + + for (var resultEntry : itemsByType.entrySet()) { + if (resultEntry.getKey().equals("errors") == false) { + builder.startArray(resultEntry.getKey()); + for (var item : resultEntry.getValue()) { + item.toXContent(builder, params); + } + builder.endArray(); + } else { + builder.startObject("errors"); + builder.field("count", resultEntry.getValue().size()); + builder.startObject("details"); + for (var item : resultEntry.getValue()) { + builder.startObject(item.roleName); + item.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + } + } + + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } + + public static class Item implements ToXContentObject { + private final Exception cause; + private final String roleName; + + private final DocWriteResponse.Result resultType; + + private Item(String roleName, DocWriteResponse.Result resultType, Exception cause) { + this.roleName = roleName; + this.resultType = resultType; + this.cause = cause; + } + + Item(StreamInput in) throws IOException { + roleName = in.readString(); + resultType = DocWriteResponse.Result.readFrom(in); + cause = in.readException(); + } + + public Exception getCause() { + return cause; + } + + public String getResultType() { + return resultType == null ? "errors" : resultType.getLowercase(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (resultType == null) { + ElasticsearchException.generateThrowableXContent(builder, params, cause); + } else { + builder.value(roleName); + } + return builder; + } + + public static Item success(String roleName, DocWriteResponse.Result result) { + return new Item(roleName, result, null); + } + + public static Item failure(String roleName, Exception cause) { + return new Item(roleName, null, cause); + } + + public String getRoleName() { + return roleName; + } + + public boolean isFailed() { + return cause != null; + } + + public String getFailureMessage() { + if (cause != null) { + return cause.getMessage(); + } + return null; + } + } + + public List getItems() { + return items; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleRequest.java new file mode 100644 index 0000000000000..c61f9b7156dda --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleRequest.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public final class QueryRoleRequest extends ActionRequest { + + @Nullable + private final QueryBuilder queryBuilder; + @Nullable + private final Integer from; + @Nullable + private final Integer size; + @Nullable + private final List fieldSortBuilders; + @Nullable + private final SearchAfterBuilder searchAfterBuilder; + + public QueryRoleRequest( + @Nullable QueryBuilder queryBuilder, + @Nullable Integer from, + @Nullable Integer size, + @Nullable List fieldSortBuilders, + @Nullable SearchAfterBuilder searchAfterBuilder + ) { + this.queryBuilder = queryBuilder; + this.from = from; + this.size = size; + this.fieldSortBuilders = fieldSortBuilders; + this.searchAfterBuilder = searchAfterBuilder; + } + + public QueryBuilder getQueryBuilder() { + return queryBuilder; + } + + public Integer getFrom() { + return from; + } + + public Integer getSize() { + return size; + } + + public List getFieldSortBuilders() { + return fieldSortBuilders; + } + + public SearchAfterBuilder getSearchAfterBuilder() { + return searchAfterBuilder; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (from != null && from < 0) { + validationException = addValidationError("[from] parameter cannot be negative but was [" + from + "]", validationException); + } + if (size != null && size < 0) { + validationException = addValidationError("[size] parameter cannot be negative but was [" + size + "]", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleResponse.java new file mode 100644 index 0000000000000..8e9da10e449ad --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleResponse.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public final class QueryRoleResponse extends ActionResponse implements ToXContentObject { + + public static final QueryRoleResponse EMPTY = new QueryRoleResponse(0, List.of()); + + private final long total; + private final List foundRoleDescriptors; + + public QueryRoleResponse(long total, List foundRoleDescriptors) { + this.total = total; + Objects.requireNonNull(foundRoleDescriptors, "found_role_descriptor must be provided"); + this.foundRoleDescriptors = foundRoleDescriptors; + } + + public long getTotal() { + return total; + } + + public List getRoleDescriptors() { + return foundRoleDescriptors; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("total", total).field("count", foundRoleDescriptors.size()).field("roles", foundRoleDescriptors); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueryRoleResponse that = (QueryRoleResponse) o; + return total == that.total && Objects.equals(foundRoleDescriptors, that.foundRoleDescriptors); + } + + @Override + public int hashCode() { + int result = Objects.hash(total); + result = 31 * result + Objects.hash(foundRoleDescriptors); + return result; + } + + @Override + public String toString() { + return "QueryRoleResponse{total=" + total + ", items=" + foundRoleDescriptors + "}"; + } + + public record Item(RoleDescriptor roleDescriptor, @Nullable Object[] sortValues) implements ToXContentObject { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + // The role name is not normally stored in the role document (it is part of the doc id), + // so the "toXContent" method doesn't include it. + // But, for the query role API, we'd like to return the role name together with the + // other details of the role descriptor (in the same object). + assert Strings.isNullOrEmpty(roleDescriptor.getName()) == false; + builder.field("name", roleDescriptor.getName()); + roleDescriptor.innerToXContent(builder, params, false); + if (sortValues != null && sortValues.length > 0) { + builder.array("_sort", sortValues); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return "Item{roleDescriptor=" + roleDescriptor + ", sortValues=" + Arrays.toString(sortValues) + "}"; + } + } + + public record QueryRoleResult(long total, List items) { + public static final QueryRoleResult EMPTY = new QueryRoleResult(0, List.of()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index baf72a3411cde..1a8839fa0fa4a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -417,7 +417,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean docCreation) throws IOException { - return toXContent(builder, params, docCreation, false); + builder.startObject(); + innerToXContent(builder, params, docCreation); + return builder.endObject(); } /** @@ -428,13 +430,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, boolea * @param docCreation {@code true} if the x-content is being generated for creating a document * in the security index, {@code false} if the x-content being generated * is for API display purposes - * @param includeMetadataFlattened {@code true} if the metadataFlattened field should be included in doc * @return x-content builder * @throws IOException if there was an error writing the x-content to the builder */ - public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean docCreation, boolean includeMetadataFlattened) - throws IOException { - builder.startObject(); + public XContentBuilder innerToXContent(XContentBuilder builder, Params params, boolean docCreation) throws IOException { builder.array(Fields.CLUSTER.getPreferredName(), clusterPrivileges); if (configurableClusterPrivileges.length != 0) { builder.field(Fields.GLOBAL.getPreferredName()); @@ -446,9 +445,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, boolea builder.array(Fields.RUN_AS.getPreferredName(), runAs); } builder.field(Fields.METADATA.getPreferredName(), metadata); - if (includeMetadataFlattened) { - builder.field(Fields.METADATA_FLATTENED.getPreferredName(), metadata); - } + if (docCreation) { builder.field(Fields.TYPE.getPreferredName(), ROLE_TYPE); } else { @@ -466,7 +463,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, boolea if (hasDescription()) { builder.field(Fields.DESCRIPTION.getPreferredName(), description); } - return builder.endObject(); + return builder; } @Override @@ -545,12 +542,17 @@ public RoleDescriptor parse(String name, BytesReference source, XContentType xCo } public RoleDescriptor parse(String name, XContentParser parser) throws IOException { - // validate name - Validation.Error validationError = Validation.Roles.validateRoleName(name, true); - if (validationError != null) { - ValidationException ve = new ValidationException(); - ve.addValidationError(validationError.toString()); - throw ve; + return parse(name, parser, true); + } + + public RoleDescriptor parse(String name, XContentParser parser, boolean validate) throws IOException { + if (validate) { + Validation.Error validationError = Validation.Roles.validateRoleName(name, true); + if (validationError != null) { + ValidationException ve = new ValidationException(); + ve.addValidationError(validationError.toString()); + throw ve; + } } // advance to the START_OBJECT token if needed @@ -1185,7 +1187,7 @@ private static ApplicationResourcePrivileges parseApplicationPrivilege(String ro public static final class RemoteIndicesPrivileges implements Writeable, ToXContentObject { - private static final RemoteIndicesPrivileges[] NONE = new RemoteIndicesPrivileges[0]; + public static final RemoteIndicesPrivileges[] NONE = new RemoteIndicesPrivileges[0]; private final IndicesPrivileges indicesPrivileges; private final String[] remoteClusters; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index 4465d7d083183..a435f7e877250 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -67,6 +67,7 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.function.Predicate; +import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -174,6 +175,23 @@ public class ClusterPrivilegeResolver { ); private static final Set MANAGE_SEARCH_APPLICATION_PATTERN = Set.of("cluster:admin/xpack/application/search_application/*"); + private static final Set MANAGE_CONNECTOR_PATTERN = Set.of("cluster:admin/xpack/connector/*"); + private static final Set MONITOR_CONNECTOR_PATTERN = Set.of( + "cluster:admin/xpack/connector/get", + "cluster:admin/xpack/connector/list", + "cluster:admin/xpack/connector/sync_job/get", + "cluster:admin/xpack/connector/sync_job/list" + ); + private static final Set READ_CONNECTOR_SECRETS_PATTERN = Set.of("cluster:admin/xpack/connector/secret/get"); + private static final Set WRITE_CONNECTOR_SECRETS_PATTERN = Set.of( + "cluster:admin/xpack/connector/secret/delete", + "cluster:admin/xpack/connector/secret/post", + "cluster:admin/xpack/connector/secret/put" + ); + private static final Set CONNECTOR_SECRETS_PATTERN = Stream.concat( + READ_CONNECTOR_SECRETS_PATTERN.stream(), + WRITE_CONNECTOR_SECRETS_PATTERN.stream() + ).collect(Collectors.toSet()); private static final Set MANAGE_SEARCH_QUERY_RULES_PATTERN = Set.of("cluster:admin/xpack/query_rules/*"); private static final Set MANAGE_SEARCH_SYNONYMS_PATTERN = Set.of( "cluster:admin/synonyms/*", @@ -262,6 +280,7 @@ public class ClusterPrivilegeResolver { ProfileHasPrivilegesAction.NAME, SuggestProfilesAction.NAME, GetRolesAction.NAME, + ActionTypes.QUERY_ROLE_ACTION.name(), GetRoleMappingsAction.NAME, GetServiceAccountAction.NAME, GetServiceAccountCredentialsAction.NAME + "*", @@ -332,6 +351,15 @@ public class ClusterPrivilegeResolver { "manage_search_application", MANAGE_SEARCH_APPLICATION_PATTERN ); + public static final NamedClusterPrivilege MANAGE_CONNECTOR = new ActionClusterPrivilege( + "manage_connector", + MANAGE_CONNECTOR_PATTERN, + CONNECTOR_SECRETS_PATTERN + ); + public static final NamedClusterPrivilege MONITOR_CONNECTOR = new ActionClusterPrivilege( + "monitor_connector", + MONITOR_CONNECTOR_PATTERN + ); public static final NamedClusterPrivilege MANAGE_SEARCH_SYNONYMS = new ActionClusterPrivilege( "manage_search_synonyms", MANAGE_SEARCH_SYNONYMS_PATTERN @@ -362,16 +390,12 @@ public class ClusterPrivilegeResolver { public static final NamedClusterPrivilege READ_CONNECTOR_SECRETS = new ActionClusterPrivilege( "read_connector_secrets", - Set.of("cluster:admin/xpack/connector/secret/get") + READ_CONNECTOR_SECRETS_PATTERN ); public static final NamedClusterPrivilege WRITE_CONNECTOR_SECRETS = new ActionClusterPrivilege( "write_connector_secrets", - Set.of( - "cluster:admin/xpack/connector/secret/delete", - "cluster:admin/xpack/connector/secret/post", - "cluster:admin/xpack/connector/secret/put" - ) + WRITE_CONNECTOR_SECRETS_PATTERN ); public static final NamedClusterPrivilege MONITOR_GLOBAL_RETENTION = new ActionClusterPrivilege( "monitor_data_stream_global_retention", @@ -382,11 +406,16 @@ public class ClusterPrivilegeResolver { Set.of("cluster:admin/data_stream/global_retention/*", "cluster:monitor/data_stream/global_retention/*") ); + /** + * If you are adding a new named cluster privilege, also add it to the + * docs. + */ private static final Map VALUES = sortByAccessLevel( Stream.of( NONE, ALL, MONITOR, + MONITOR_CONNECTOR, MONITOR_INFERENCE, MONITOR_ML, MONITOR_TEXT_STRUCTURE, @@ -396,6 +425,7 @@ public class ClusterPrivilegeResolver { MONITOR_ROLLUP, MONITOR_ENRICH, MANAGE, + MANAGE_CONNECTOR, MANAGE_INFERENCE, MANAGE_ML, MANAGE_TRANSFORM_DEPRECATED, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 8443dc4083694..7174b2f616c2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -208,6 +208,10 @@ public final class IndexPrivilege extends Privilege { CROSS_CLUSTER_REPLICATION_INTERNAL_AUTOMATON ); + /** + * If you are adding a new named index privilege, also add it to the + * docs. + */ @SuppressWarnings("unchecked") private static final Map VALUES = sortByAccessLevel( Stream.of( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 41da995797e29..a0fe3d09eccc7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -73,6 +73,8 @@ static RoleDescriptor kibanaSystem(String name) { // For Fleet package upgrade "manage_pipeline", "manage_ilm", + // For connectors telemetry + "monitor_connector", // For the endpoint package that ships a transform "manage_transform", InvalidateApiKeyAction.NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/SecurityMigrationTaskParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/SecurityMigrationTaskParams.java index d54f3098fead9..14cc4d3d6f5b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/SecurityMigrationTaskParams.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/SecurityMigrationTaskParams.java @@ -21,33 +21,46 @@ import java.io.IOException; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class SecurityMigrationTaskParams implements PersistentTaskParams { public static final String TASK_NAME = "security-migration"; private final int migrationVersion; + private final boolean migrationNeeded; + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( TASK_NAME, true, - (arr) -> new SecurityMigrationTaskParams((int) arr[0]) + (arr) -> new SecurityMigrationTaskParams((int) arr[0], arr[1] == null || (boolean) arr[1]) ); static { PARSER.declareInt(constructorArg(), new ParseField("migration_version")); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField("migration_needed")); } - public SecurityMigrationTaskParams(int migrationVersion) { + public SecurityMigrationTaskParams(int migrationVersion, boolean migrationNeeded) { this.migrationVersion = migrationVersion; + this.migrationNeeded = migrationNeeded; } public SecurityMigrationTaskParams(StreamInput in) throws IOException { this.migrationVersion = in.readInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.SECURITY_MIGRATIONS_MIGRATION_NEEDED_ADDED)) { + this.migrationNeeded = in.readBoolean(); + } else { + this.migrationNeeded = true; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeInt(migrationVersion); + if (out.getTransportVersion().onOrAfter(TransportVersions.SECURITY_MIGRATIONS_MIGRATION_NEEDED_ADDED)) { + out.writeBoolean(migrationNeeded); + } } @Override @@ -64,6 +77,7 @@ public TransportVersion getMinimalSupportedVersion() { public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field("migration_version", migrationVersion); + builder.field("migration_needed", migrationNeeded); builder.endObject(); return builder; } @@ -75,4 +89,8 @@ public static SecurityMigrationTaskParams fromXContent(XContentParser parser) { public int getMigrationVersion() { return migrationVersion; } + + public boolean isMigrationNeeded() { + return migrationNeeded; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GeoTileGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GeoTileGroupSource.java index 68109f429f461..6b4394f1c2b52 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GeoTileGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GeoTileGroupSource.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -138,7 +137,7 @@ public int hashCode() { @Override public String getMappingType() { - return GeoShapeFieldMapper.CONTENT_TYPE; + return "geo_shape"; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java index 96fa4de6c0d9b..3270a839778fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java @@ -16,6 +16,7 @@ import java.io.IOException; import java.time.Clock; import java.time.ZonedDateTime; +import java.util.Arrays; /** * A xcontent parser that is used by watcher. This is a special parser that is @@ -50,7 +51,9 @@ public static Secret secretOrNull(XContentParser parser) throws IOException { throw new ElasticsearchParseException("found redacted password in field [{}]", parser.currentName()); } } else if (watcherParser.cryptoService != null) { - return new Secret(watcherParser.cryptoService.encrypt(chars)); + char[] encryptedChars = watcherParser.cryptoService.encrypt(chars); + Arrays.fill(chars, '\0'); // Clear chars from unencrypted buffer + return new Secret(encryptedChars); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java index 476167c5db0fb..d4d4146c6a5ba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/InferenceActionRequestTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -71,6 +72,94 @@ public void testParsing() throws IOException { } } + public void testValidation_TextEmbedding() { + InferenceAction.Request request = new InferenceAction.Request( + TaskType.TEXT_EMBEDDING, + "model", + null, + List.of("input"), + null, + null, + null + ); + ActionRequestValidationException e = request.validate(); + assertNull(e); + } + + public void testValidation_Rerank() { + InferenceAction.Request request = new InferenceAction.Request( + TaskType.RERANK, + "model", + "query", + List.of("input"), + null, + null, + null + ); + ActionRequestValidationException e = request.validate(); + assertNull(e); + } + + public void testValidation_TextEmbedding_Null() { + InferenceAction.Request inputNullRequest = new InferenceAction.Request( + TaskType.TEXT_EMBEDDING, + "model", + null, + null, + null, + null, + null + ); + ActionRequestValidationException inputNullError = inputNullRequest.validate(); + assertNotNull(inputNullError); + assertThat(inputNullError.getMessage(), is("Validation Failed: 1: Field [input] cannot be null;")); + } + + public void testValidation_TextEmbedding_Empty() { + InferenceAction.Request inputEmptyRequest = new InferenceAction.Request( + TaskType.TEXT_EMBEDDING, + "model", + null, + List.of(), + null, + null, + null + ); + ActionRequestValidationException inputEmptyError = inputEmptyRequest.validate(); + assertNotNull(inputEmptyError); + assertThat(inputEmptyError.getMessage(), is("Validation Failed: 1: Field [input] cannot be an empty array;")); + } + + public void testValidation_Rerank_Null() { + InferenceAction.Request queryNullRequest = new InferenceAction.Request( + TaskType.RERANK, + "model", + null, + List.of("input"), + null, + null, + null + ); + ActionRequestValidationException queryNullError = queryNullRequest.validate(); + assertNotNull(queryNullError); + assertThat(queryNullError.getMessage(), is("Validation Failed: 1: Field [query] cannot be null for task type [rerank];")); + } + + public void testValidation_Rerank_Empty() { + InferenceAction.Request queryEmptyRequest = new InferenceAction.Request( + TaskType.RERANK, + "model", + "", + List.of("input"), + null, + null, + null + ); + ActionRequestValidationException queryEmptyError = queryEmptyRequest.validate(); + assertNotNull(queryEmptyError); + assertThat(queryEmptyError.getMessage(), is("Validation Failed: 1: Field [query] cannot be empty for task type [rerank];")); + } + public void testParseRequest_DefaultsInputTypeToIngest() throws IOException { String singleInputRequest = """ { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java index e9a5b08f8051d..2098a7ff904a1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java @@ -9,6 +9,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; @@ -66,8 +67,10 @@ protected final void assertBwcSerialization(T testInstance, TransportVersion ver * @param version The version which serialized */ protected void assertOnBWCObject(T bwcSerializedObject, T testInstance, TransportVersion version) { - assertNotSame(version.toString(), bwcSerializedObject, testInstance); - assertEquals(version.toString(), bwcSerializedObject, testInstance); - assertEquals(version.toString(), bwcSerializedObject.hashCode(), testInstance.hashCode()); + var errorMessage = Strings.format("Failed for TransportVersion [%s]", version.toString()); + + assertNotSame(errorMessage, bwcSerializedObject, testInstance); + assertEquals(errorMessage, bwcSerializedObject, testInstance); + assertEquals(errorMessage, bwcSerializedObject.hashCode(), testInstance.hashCode()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentActionRequestTests.java index 71a68a65b7977..39f646df0d582 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentActionRequestTests.java @@ -14,7 +14,7 @@ public class CreateTrainedModelAssignmentActionRequestTests extends AbstractWire @Override protected Request createTestInstance() { - return new Request(StartTrainedModelDeploymentTaskParamsTests.createRandom()); + return new Request(StartTrainedModelDeploymentTaskParamsTests.createRandom(), null); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java index 8c175c17fccc8..d60bbc6cc7713 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java @@ -156,6 +156,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), + null, stats.getDeploymentStats().getQueueCapacity(), null, stats.getDeploymentStats().getStartTime(), @@ -228,6 +229,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), + null, stats.getDeploymentStats().getQueueCapacity(), null, stats.getDeploymentStats().getStartTime(), @@ -300,6 +302,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), + null, stats.getDeploymentStats().getQueueCapacity(), null, stats.getDeploymentStats().getStartTime(), @@ -372,6 +375,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), + null, stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), @@ -445,6 +449,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), + null, stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), @@ -518,6 +523,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), + null, stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), @@ -591,6 +597,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), + null, stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java index ad33a85d42e53..730d994fc5e35 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java @@ -71,7 +71,8 @@ public static Request createRandom() { } if (randomBoolean()) { request.setPriority(randomFrom(Priority.values()).toString()); - if (request.getNumberOfAllocations() > 1 || request.getThreadsPerAllocation() > 1) { + if ((request.getNumberOfAllocations() != null && request.getNumberOfAllocations() > 1) + || request.getThreadsPerAllocation() > 1) { request.setPriority(Priority.NORMAL.toString()); } } @@ -230,7 +231,8 @@ public void testDefaults() { Request request = new Request(randomAlphaOfLength(10), randomAlphaOfLength(10)); assertThat(request.getTimeout(), equalTo(TimeValue.timeValueSeconds(30))); assertThat(request.getWaitForState(), equalTo(AllocationStatus.State.STARTED)); - assertThat(request.getNumberOfAllocations(), equalTo(1)); + assertThat(request.getNumberOfAllocations(), nullValue()); + assertThat(request.computeNumberOfAllocations(), equalTo(1)); assertThat(request.getThreadsPerAllocation(), equalTo(1)); assertThat(request.getQueueCapacity(), equalTo(1024)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStatsTests.java index a1ab023a6935f..07c56b073cd00 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStatsTests.java @@ -50,6 +50,7 @@ public static AssignmentStats randomDeploymentStats() { modelId, randomBoolean() ? null : randomIntBetween(1, 8), randomBoolean() ? null : randomIntBetween(1, 8), + null, randomBoolean() ? null : randomIntBetween(1, 10000), randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 10000000)), Instant.now(), @@ -102,6 +103,7 @@ public void testGetOverallInferenceStats() { modelId, randomBoolean() ? null : randomIntBetween(1, 8), randomBoolean() ? null : randomIntBetween(1, 8), + null, randomBoolean() ? null : randomIntBetween(1, 10000), randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), Instant.now(), @@ -166,6 +168,7 @@ public void testGetOverallInferenceStatsWithNoNodes() { modelId, randomBoolean() ? null : randomIntBetween(1, 8), randomBoolean() ? null : randomIntBetween(1, 8), + null, randomBoolean() ? null : randomIntBetween(1, 10000), randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), Instant.now(), @@ -187,6 +190,7 @@ public void testGetOverallInferenceStatsWithOnlyStoppedNodes() { modelId, randomBoolean() ? null : randomIntBetween(1, 8), randomBoolean() ? null : randomIntBetween(1, 8), + null, randomBoolean() ? null : randomIntBetween(1, 10000), randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), Instant.now(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java index 75706f3d6a9bf..6d70105dfedba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java @@ -39,7 +39,7 @@ public class TrainedModelAssignmentTests extends AbstractXContentSerializingTestCase { public static TrainedModelAssignment randomInstance() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomParams()); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomParams(), null); List nodes = Stream.generate(() -> randomAlphaOfLength(10)).limit(randomInt(5)).toList(); for (String node : nodes) { builder.addRoutingEntry(node, RoutingInfoTests.randomInstance()); @@ -72,7 +72,7 @@ protected TrainedModelAssignment mutateInstance(TrainedModelAssignment instance) } public void testBuilderAddingExistingRoute() { - TrainedModelAssignment.Builder assignment = TrainedModelAssignment.Builder.empty(randomParams()); + TrainedModelAssignment.Builder assignment = TrainedModelAssignment.Builder.empty(randomParams(), null); String addingNode = "new-node"; assignment.addRoutingEntry(addingNode, RoutingInfoTests.randomInstance()); @@ -80,7 +80,7 @@ public void testBuilderAddingExistingRoute() { } public void testBuilderUpdatingMissingRoute() { - TrainedModelAssignment.Builder assignment = TrainedModelAssignment.Builder.empty(randomParams()); + TrainedModelAssignment.Builder assignment = TrainedModelAssignment.Builder.empty(randomParams(), null); String addingNode = "new-node"; expectThrows( ResourceNotFoundException.class, @@ -93,7 +93,7 @@ public void testGetStartedNodes() { String startedNode2 = "started-node-2"; String nodeInAnotherState1 = "another-state-node-1"; String nodeInAnotherState2 = "another-state-node-2"; - TrainedModelAssignment allocation = TrainedModelAssignment.Builder.empty(randomParams()) + TrainedModelAssignment allocation = TrainedModelAssignment.Builder.empty(randomParams(), null) .addRoutingEntry(startedNode1, RoutingInfoTests.randomInstance(RoutingState.STARTED)) .addRoutingEntry(startedNode2, RoutingInfoTests.randomInstance(RoutingState.STARTED)) .addRoutingEntry( @@ -114,20 +114,20 @@ public void testGetStartedNodes() { public void testCalculateAllocationStatus_GivenNoAllocations() { assertThat( - TrainedModelAssignment.Builder.empty(randomTaskParams(5)).build().calculateAllocationStatus(), + TrainedModelAssignment.Builder.empty(randomTaskParams(5), null).build().calculateAllocationStatus(), isPresentWith(new AllocationStatus(0, 5)) ); } public void testCalculateAllocationStatus_GivenStoppingAssignment() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(1, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 1, RoutingState.STARTED, "")); assertThat(builder.stopAssignment("test").build().calculateAllocationStatus(), isEmpty()); } public void testCalculateAllocationStatus_GivenPartiallyAllocated() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(1, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 1, RoutingState.STARTED, "")); builder.addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTING, "")); @@ -135,28 +135,28 @@ public void testCalculateAllocationStatus_GivenPartiallyAllocated() { } public void testCalculateAllocationStatus_GivenFullyAllocated() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")); assertThat(builder.build().calculateAllocationStatus(), isPresentWith(new AllocationStatus(5, 5))); } public void testCalculateAssignmentState_GivenNoStartedAssignments() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTING, "")); builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTING, "")); assertThat(builder.calculateAssignmentState(), equalTo(AssignmentState.STARTING)); } public void testCalculateAssignmentState_GivenOneStartedAssignment() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTING, "")); builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")); assertThat(builder.calculateAssignmentState(), equalTo(AssignmentState.STARTED)); } public void testCalculateAndSetAssignmentState_GivenStoppingAssignment() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")); assertThat( @@ -166,7 +166,7 @@ public void testCalculateAndSetAssignmentState_GivenStoppingAssignment() { } public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenNoStartedAllocations() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTING, "")); builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STOPPED, "")); TrainedModelAssignment assignment = builder.build(); @@ -175,7 +175,7 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenNoS } public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenSingleStartedNode() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTED, "")); TrainedModelAssignment assignment = builder.build(); @@ -185,7 +185,7 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenSin } public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenAShuttingDownRoute_ItReturnsNoNodes() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTED, "")); TrainedModelAssignment assignment = builder.build(); @@ -195,7 +195,7 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenASh } public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenAShuttingDownRoute_ItReturnsNode1() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STOPPING, "")); TrainedModelAssignment assignment = builder.build(); @@ -205,7 +205,7 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenASh } public void testSingleRequestWith2Nodes() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")); TrainedModelAssignment assignment = builder.build(); @@ -216,7 +216,7 @@ public void testSingleRequestWith2Nodes() { } public void testSelectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenMultipleStartedNodes() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6), null); builder.addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTED, "")); @@ -239,7 +239,7 @@ public void testSelectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenMul } public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenMultipleStartedNodesWithZeroAllocations() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6), null); builder.addRoutingEntry("node-1", new RoutingInfo(0, 0, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(0, 0, RoutingState.STARTED, "")); builder.addRoutingEntry("node-3", new RoutingInfo(0, 0, RoutingState.STARTED, "")); @@ -257,7 +257,7 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenMul } public void testIsSatisfied_GivenEnoughAllocations() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6), null); builder.addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTED, "")); @@ -266,7 +266,7 @@ public void testIsSatisfied_GivenEnoughAllocations() { } public void testIsSatisfied_GivenEnoughAllocations_ButOneNodeIsNotAssignable() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6), null); builder.addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTED, "")); @@ -275,7 +275,7 @@ public void testIsSatisfied_GivenEnoughAllocations_ButOneNodeIsNotAssignable() { } public void testIsSatisfied_GivenEnoughAllocations_ButOneNodeIsNeitherStartingNorStarted() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6), null); builder.addRoutingEntry( "node-1", new RoutingInfo(1, 1, randomFrom(RoutingState.FAILED, RoutingState.STOPPING, RoutingState.STOPPED), "") @@ -287,7 +287,7 @@ public void testIsSatisfied_GivenEnoughAllocations_ButOneNodeIsNeitherStartingNo } public void testIsSatisfied_GivenNotEnoughAllocations() { - TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(7)); + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(7), null); builder.addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTED, "")); @@ -296,7 +296,7 @@ public void testIsSatisfied_GivenNotEnoughAllocations() { } public void testMaxAssignedAllocations() { - TrainedModelAssignment assignment = TrainedModelAssignment.Builder.empty(randomTaskParams(10)) + TrainedModelAssignment assignment = TrainedModelAssignment.Builder.empty(randomTaskParams(10), null) .addRoutingEntry("node-1", new RoutingInfo(1, 2, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(2, 1, RoutingState.STARTED, "")) .addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTING, "")) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilderTests.java index 43a531fcf8229..bb727204e2651 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/WeightedTokensQueryBuilderTests.java @@ -190,6 +190,30 @@ public void testToQuery() throws IOException { } } + @Override + public void testFromXContent() throws IOException { + super.testFromXContent(); + assertCriticalWarnings(WeightedTokensQueryBuilder.WEIGHTED_TOKENS_DEPRECATION_MESSAGE); + } + + @Override + public void testUnknownField() throws IOException { + super.testUnknownField(); + assertCriticalWarnings(WeightedTokensQueryBuilder.WEIGHTED_TOKENS_DEPRECATION_MESSAGE); + } + + @Override + public void testUnknownObjectException() throws IOException { + super.testUnknownObjectException(); + assertCriticalWarnings(WeightedTokensQueryBuilder.WEIGHTED_TOKENS_DEPRECATION_MESSAGE); + } + + @Override + public void testValidOutput() throws IOException { + super.testValidOutput(); + assertCriticalWarnings(WeightedTokensQueryBuilder.WEIGHTED_TOKENS_DEPRECATION_MESSAGE); + } + public void testPruningIsAppliedCorrectly() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { List documents = List.of( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleRequestTests.java new file mode 100644 index 0000000000000..832e35a8dce32 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/QueryRoleRequestTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.nullValue; + +public class QueryRoleRequestTests extends ESTestCase { + public void testValidate() { + final QueryRoleRequest request1 = new QueryRoleRequest( + null, + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + null, + null + ); + assertThat(request1.validate(), nullValue()); + + final QueryRoleRequest request2 = new QueryRoleRequest( + null, + randomIntBetween(Integer.MIN_VALUE, -1), + randomIntBetween(0, Integer.MAX_VALUE), + null, + null + ); + assertThat(request2.validate().getMessage(), containsString("[from] parameter cannot be negative")); + + final QueryRoleRequest request3 = new QueryRoleRequest( + null, + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(Integer.MIN_VALUE, -1), + null, + null + ); + assertThat(request3.validate().getMessage(), containsString("[size] parameter cannot be negative")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 1ade22179ab59..6f3c435eb12f6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -278,6 +278,7 @@ public void testReadSecurityPrivilege() { ProfileHasPrivilegesAction.NAME, SuggestProfilesAction.NAME, GetRolesAction.NAME, + ActionTypes.QUERY_ROLE_ACTION.name(), GetRoleMappingsAction.NAME, GetServiceAccountAction.NAME, GetServiceAccountCredentialsAction.NAME, @@ -297,6 +298,7 @@ public void testReadSecurityPrivilege() { PutUserAction.NAME, DeleteUserAction.NAME, PutRoleAction.NAME, + ActionTypes.BULK_PUT_ROLES.name(), DeleteRoleAction.NAME, PutRoleMappingAction.NAME, DeleteRoleMappingAction.NAME, @@ -339,6 +341,7 @@ public void testManageUserProfilePrivilege() { ClusterPrivilegeResolver.MANAGE_USER_PROFILE, "cluster:admin/xpack/security/role/put", "cluster:admin/xpack/security/role/get", + "cluster:admin/xpack/security/role/query", "cluster:admin/xpack/security/role/delete" ); verifyClusterActionDenied( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java index b0ad137f0f1b6..2ea372a84b66c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; @@ -50,7 +50,13 @@ public class RestTermsEnumActionTests extends ESTestCase { private static NodeClient client = new NodeClient(Settings.EMPTY, threadPool); private static UsageService usageService = new UsageService(); - private static RestController controller = new RestController(null, client, new NoneCircuitBreakerService(), usageService, Tracer.NOOP); + private static RestController controller = new RestController( + null, + client, + new NoneCircuitBreakerService(), + usageService, + TelemetryProvider.NOOP + ); private static RestTermsEnumAction action = new RestTermsEnumAction(); /** diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings-logsdb.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings-logsdb.json deleted file mode 100644 index b02866e867c4a..0000000000000 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings-logsdb.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "template": { - "settings": { - "index": { - "mode": "logs", - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "mapping": { - "ignore_malformed": true, - "total_fields": { - "ignore_dynamic_beyond_limit": true - } - }, - "default_pipeline": "logs@default-pipeline" - } - } - }, - "_meta": { - "description": "default settings for the logs index template installed by x-pack", - "managed": true - }, - "version": ${xpack.stack.template.version}, - "deprecated": ${xpack.stack.template.deprecated} -} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json index ca2659b8d8dea..240abf9934db5 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/logs@settings.json @@ -5,6 +5,7 @@ "lifecycle": { "name": "logs" }, + "mode": "${xpack.stack.template.logs.index.mode}", "codec": "best_compression", "mapping": { "ignore_malformed": true, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json index 4f3fac1aed5ae..9960bd2e7fdac 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@settings.json @@ -10,9 +10,6 @@ "total_fields": { "ignore_dynamic_beyond_limit": true } - }, - "query": { - "default_field": ["message"] } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json index b0db168e8189d..cb0e2cbffb50b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@tsdb-settings.json @@ -9,9 +9,6 @@ "total_fields": { "ignore_dynamic_beyond_limit": true } - }, - "query": { - "default_field": ["message"] } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json index fab8ca451358f..7457dce805eca 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json @@ -1,5 +1,7 @@ { - "index_patterns": [".monitoring-beats-${xpack.stack.monitoring.template.version}-*"], + "index_patterns": [ + ".monitoring-beats-${xpack.stack.monitoring.template.version}-*" + ], "version": ${xpack.stack.monitoring.template.release.version}, "template": { "mappings": { @@ -198,6 +200,9 @@ "ratelimit": { "type": "long" }, + "timeout": { + "type": "long" + }, "toolarge": { "type": "long" }, @@ -212,16 +217,6 @@ } } }, - "request": { - "properties": { - "count": { - "type": "long" - } - } - }, - "unset": { - "type": "long" - }, "valid": { "properties": { "accepted": { @@ -239,151 +234,436 @@ } } } + }, + "unset": { + "type": "long" } } }, - "decoder": { + "agentcfg": { "properties": { - "deflate": { - "properties": { - "content-length": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "gzip": { - "properties": { - "content-length": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "missing-content-length": { + "elasticsearch": { "properties": { - "count": { - "type": "long" - } - } - }, - "reader": { - "properties": { - "count": { - "type": "long" - }, - "size": { - "type": "long" - } - } - }, - "uncompressed": { - "properties": { - "content-length": { - "type": "long" + "cache": { + "properties": { + "entries": { + "properties": { + "count": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } + } + } }, - "count": { - "type": "long" + "fetch": { + "properties": { + "es": { + "type": "long" + }, + "fallback": { + "type": "long" + }, + "invalid": { + "type": "long" + }, + "unavailable": { + "type": "long" + } + } } } } } }, - "processor": { + "jaeger": { "properties": { - "error": { + "grpc": { "properties": { - "decoding": { + "collect": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "frames": { - "type": "long" - }, - "spans": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "transformations": { - "type": "long" - }, - "validation": { + "sampling": { "properties": { - "count": { - "type": "long" + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "long" + } + } + } + } }, - "errors": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } - }, - "metric": { + } + } + }, + "otlp": { + "properties": { + "grpc": { "properties": { - "decoding": { + "logs": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "transformations": { - "type": "long" + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } + }, + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } }, - "validation": { + "traces": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } }, - "sourcemap": { + "http": { "properties": { - "counter": { - "type": "long" + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } }, - "decoding": { + "metrics": { "properties": { - "count": { - "type": "long" + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "validation": { + "traces": { "properties": { - "count": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "errors": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } } + } + } + }, + "processor": { + "properties": { + "error": { + "properties": { + "transformations": { + "type": "long" + } + } + }, + "metric": { + "properties": { + "transformations": { + "type": "long" + } + } }, "span": { "properties": { @@ -392,60 +672,127 @@ } } }, - "transaction": { + "stream": { "properties": { - "decoding": { + "accepted": { + "type": "long" + }, + "errors": { "properties": { - "count": { + "invalid": { "type": "long" }, - "errors": { + "toolarge": { "type": "long" } } - }, - "frames": { - "type": "long" - }, - "spans": { + } + } + }, + "transaction": { + "properties": { + "transformations": { "type": "long" - }, - "stacktraces": { + } + } + } + } + }, + "root": { + "properties": { + "request": { + "properties": { + "count": { "type": "long" - }, - "transactions": { + } + } + }, + "response": { + "properties": { + "count": { "type": "long" }, - "transformations": { - "type": "long" + "errors": { + "properties": { + "closed": { + "type": "long" + }, + "count": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "internal": { + "type": "long" + }, + "invalidquery": { + "type": "long" + }, + "method": { + "type": "long" + }, + "notfound": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { + "type": "long" + } + } }, - "validation": { + "valid": { "properties": { + "accepted": { + "type": "long" + }, "count": { "type": "long" }, - "errors": { + "notmodified": { + "type": "long" + }, + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" + } + } + }, + "sampling": { + "properties": { + "transactions_dropped": { + "type": "long" } } }, "server": { "properties": { - "concurrent": { - "properties": { - "wait": { - "properties": { - "ms": { - "type": "long" - } - } - } - } - }, "request": { "properties": { "count": { @@ -478,21 +825,33 @@ "internal": { "type": "long" }, + "invalidquery": { + "type": "long" + }, "method": { "type": "long" }, + "notfound": { + "type": "long" + }, "queue": { "type": "long" }, "ratelimit": { "type": "long" }, + "timeout": { + "type": "long" + }, "toolarge": { "type": "long" }, "unauthorized": { "type": "long" }, + "unavailable": { + "type": "long" + }, "validate": { "type": "long" } @@ -506,12 +865,18 @@ "count": { "type": "long" }, + "notmodified": { + "type": "long" + }, "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } } @@ -918,6 +1283,37 @@ "type": "long" } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "long" + }, + "completed": { + "type": "long" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "long" + }, + "created": { + "type": "long" + }, + "destroyed": { + "type": "long" + } + } + } + } + } + } } } }, @@ -1135,6 +1531,10 @@ "type": "alias", "path": "beat.stats.apm_server.acm.response.errors.ratelimit" }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.errors.timeout" + }, "toolarge": { "type": "alias", "path": "beat.stats.apm_server.acm.response.errors.toolarge" @@ -1153,18 +1553,6 @@ } } }, - "request": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.request.count" - } - } - }, - "unset": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.unset" - }, "valid": { "properties": { "accepted": { @@ -1179,9 +1567,485 @@ "type": "alias", "path": "beat.stats.apm_server.acm.response.valid.notmodified" }, - "ok": { - "type": "alias", - "path": "beat.stats.apm_server.acm.response.valid.ok" + "ok": { + "type": "alias", + "path": "beat.stats.apm_server.acm.response.valid.ok" + } + } + } + } + }, + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.acm.unset" + } + } + }, + "agentcfg": { + "properties": { + "elasticsearch": { + "properties": { + "cache": { + "properties": { + "entries": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.entries.count" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.refresh.failures" + }, + "successes": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.cache.refresh.successes" + } + } + } + } + }, + "fetch": { + "properties": { + "es": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.es" + }, + "fallback": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.fallback" + }, + "invalid": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.invalid" + }, + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.agentcfg.elasticsearch.fetch.unavailable" + } + } + } + } + } + } + }, + "jaeger": { + "properties": { + "grpc": { + "properties": { + "collect": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.collect.response.valid.count" + } + } + } + } + } + } + }, + "sampling": { + "properties": { + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.event.received.count" + } + } + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.errors.count" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.jaeger.grpc.sampling.response.valid.count" + } + } + } + } + } + } + } + } + } + } + }, + "otlp": { + "properties": { + "grpc": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.logs.response.valid.count" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.consumer.unsupported_dropped" + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.metrics.response.valid.count" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.grpc.traces.response.valid.count" + } + } + } + } + } + } + } + } + }, + "http": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.logs.response.valid.count" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.consumer.unsupported_dropped" + } + } + }, + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.metrics.response.valid.count" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.request.count" + } + } + }, + "response": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.count" + }, + "errors": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.count" + }, + "ratelimit": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.ratelimit" + }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.timeout" + }, + "unauthorized": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.errors.unauthorized" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "alias", + "path": "beat.stats.apm_server.otlp.http.traces.response.valid.count" + } + } + } + } } } } @@ -1189,248 +2053,180 @@ } } }, - "decoder": { + "processor": { "properties": { - "deflate": { + "error": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.deflate.count" + "path": "beat.stats.apm_server.processor.error.transformations" } } }, - "gzip": { + "metric": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.gzip.count" + "path": "beat.stats.apm_server.processor.metric.transformations" } } }, - "missing-content-length": { + "span": { "properties": { - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.missing-content-length.count" + "path": "beat.stats.apm_server.processor.span.transformations" } } }, - "reader": { + "stream": { "properties": { - "count": { + "accepted": { "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.count" + "path": "beat.stats.apm_server.processor.stream.accepted" }, - "size": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.reader.size" + "errors": { + "properties": { + "invalid": { + "type": "alias", + "path": "beat.stats.apm_server.processor.stream.errors.invalid" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.processor.stream.errors.toolarge" + } + } } } }, - "uncompressed": { + "transaction": { "properties": { - "content-length": { - "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.content-length" - }, - "count": { + "transformations": { "type": "alias", - "path": "beat.stats.apm_server.decoder.uncompressed.count" + "path": "beat.stats.apm_server.processor.transaction.transformations" } } } } }, - "processor": { + "root": { "properties": { - "error": { + "request": { "properties": { - "decoding": { - "properties": { - "count": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.count" - }, - "errors": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.decoding.errors" - } - } - }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.error.spans" - }, - "stacktraces": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.stacktraces" - }, - "transformations": { + "path": "beat.stats.apm_server.root.request.count" + } + } + }, + "response": { + "properties": { + "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.transformations" + "path": "beat.stats.apm_server.root.response.count" }, - "validation": { + "errors": { "properties": { + "closed": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.closed" + }, "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.count" + "path": "beat.stats.apm_server.root.response.errors.count" }, - "errors": { + "decode": { "type": "alias", - "path": "beat.stats.apm_server.processor.error.validation.errors" - } - } - } - } - }, - "metric": { - "properties": { - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.decode" + }, + "forbidden": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.forbidden" }, - "errors": { + "internal": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.decoding.errors" - } - } - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.metric.transformations" - }, - "validation": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.internal" + }, + "invalidquery": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.count" + "path": "beat.stats.apm_server.root.response.errors.invalidquery" }, - "errors": { + "method": { "type": "alias", - "path": "beat.stats.apm_server.processor.metric.validation.errors" - } - } - } - } - }, - "sourcemap": { - "properties": { - "counter": { - "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.counter" - }, - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.method" + }, + "notfound": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.notfound" }, - "errors": { + "queue": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.decoding.errors" - } - } - }, - "validation": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.queue" + }, + "ratelimit": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.count" + "path": "beat.stats.apm_server.root.response.errors.ratelimit" }, - "errors": { + "timeout": { "type": "alias", - "path": "beat.stats.apm_server.processor.sourcemap.validation.errors" - } - } - } - } - }, - "span": { - "properties": { - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.span.transformations" - } - } - }, - "transaction": { - "properties": { - "decoding": { - "properties": { - "count": { + "path": "beat.stats.apm_server.root.response.errors.timeout" + }, + "toolarge": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.toolarge" + }, + "unauthorized": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.count" + "path": "beat.stats.apm_server.root.response.errors.unauthorized" }, - "errors": { + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.errors.unavailable" + }, + "validate": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.decoding.errors" + "path": "beat.stats.apm_server.root.response.errors.validate" } } }, - "frames": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.frames" - }, - "spans": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.spans" - }, - "stacktraces": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.stacktraces" - }, - "transactions": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transactions" - }, - "transformations": { - "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.transformations" - }, - "validation": { + "valid": { "properties": { + "accepted": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.valid.accepted" + }, "count": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.count" + "path": "beat.stats.apm_server.root.response.valid.count" }, - "errors": { + "notmodified": { + "type": "alias", + "path": "beat.stats.apm_server.root.response.valid.notmodified" + }, + "ok": { "type": "alias", - "path": "beat.stats.apm_server.processor.transaction.validation.errors" + "path": "beat.stats.apm_server.root.response.valid.ok" } } } } + }, + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.root.unset" + } + } + }, + "sampling": { + "properties": { + "transactions_dropped": { + "type": "alias", + "path": "beat.stats.apm_server.sampling.transactions_dropped" } } }, "server": { "properties": { - "concurrent": { - "properties": { - "wait": { - "properties": { - "ms": { - "type": "alias", - "path": "beat.stats.apm_server.server.concurrent.wait.ms" - } - } - } - } - }, "request": { "properties": { "count": { @@ -1471,10 +2267,18 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.internal" }, + "invalidquery": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.invalidquery" + }, "method": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.method" }, + "notfound": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.notfound" + }, "queue": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.queue" @@ -1483,6 +2287,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.ratelimit" }, + "timeout": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.timeout" + }, "toolarge": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.toolarge" @@ -1491,6 +2299,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.errors.unauthorized" }, + "unavailable": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.errors.unavailable" + }, "validate": { "type": "alias", "path": "beat.stats.apm_server.server.response.errors.validate" @@ -1507,6 +2319,10 @@ "type": "alias", "path": "beat.stats.apm_server.server.response.valid.count" }, + "notmodified": { + "type": "alias", + "path": "beat.stats.apm_server.server.response.valid.notmodified" + }, "ok": { "type": "alias", "path": "beat.stats.apm_server.server.response.valid.ok" @@ -1514,49 +2330,10 @@ } } } - } - } - }, - "sampling": { - "properties": { - "transactions_dropped": { - "type": "long" }, - "tail": { - "properties": { - "dynamic_service_groups": { - "type": "long" - }, - "storage": { - "properties": { - "lsm_size": { - "type": "long" - }, - "value_log_size": { - "type": "long" - } - } - }, - "events": { - "properties": { - "processed": { - "type": "long" - }, - "dropped": { - "type": "long" - }, - "stored": { - "type": "long" - }, - "sampled": { - "type": "long" - }, - "head_unsampled": { - "type": "long" - } - } - } - } + "unset": { + "type": "alias", + "path": "beat.stats.apm_server.server.unset" } } } @@ -1985,6 +2762,42 @@ } } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.bulk_requests.available" + }, + "completed": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.bulk_requests.completed" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.active" + }, + "created": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.created" + }, + "destroyed": { + "type": "alias", + "path": "beat.stats.output.elasticsearch.indexers.destroyed" + } + } + } + } + } + } } } }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json index 6dee05564cc10..d699317c29da3 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json @@ -346,17 +346,11 @@ "response": { "properties": { "count": { - "type": "long" + "type": "long" }, "errors": { "properties": { - "validate": { - "type": "long" - }, - "internal": { - "type": "long" - }, - "queue": { + "closed": { "type": "long" }, "count": { @@ -365,13 +359,13 @@ "decode": { "type": "long" }, - "toolarge": { + "forbidden": { "type": "long" }, - "unavailable": { + "internal": { "type": "long" }, - "forbidden": { + "invalidquery": { "type": "long" }, "method": { @@ -380,125 +374,454 @@ "notfound": { "type": "long" }, - "invalidquery": { + "queue": { "type": "long" }, "ratelimit": { "type": "long" }, - "closed": { + "timeout": { + "type": "long" + }, + "toolarge": { "type": "long" }, "unauthorized": { "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { + "type": "long" } } }, "valid": { "properties": { - "notmodified": { + "accepted": { "type": "long" }, "count": { "type": "long" }, - "ok": { + "notmodified": { "type": "long" }, - "accepted": { - "type": "long" - } - } - }, - "unset": { - "type": "long" - }, - "request": { - "properties": { - "count": { + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } }, - "server": { + "agentcfg": { "properties": { - "request": { + "elasticsearch": { "properties": { - "count": { - "type": "long" - } - } - }, - "concurrent": { - "properties": { - "wait": { + "cache": { "properties": { - "ms": { - "type": "long" + "entries": { + "properties": { + "count": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } } } - } - } - }, - "response": { - "properties": { - "count": { - "type": "long" }, - "errors": { + "fetch": { "properties": { - "count": { + "es": { "type": "long" }, - "toolarge": { + "fallback": { "type": "long" }, - "validate": { + "invalid": { "type": "long" }, - "ratelimit": { + "unavailable": { "type": "long" + } + } + } + } + } + } + }, + "jaeger": { + "properties": { + "grpc": { + "properties": { + "collect": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "queue": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "sampling": { + "properties": { + "event": { + "properties": { + "received": { + "properties": { + "count": { + "type": "long" + } + } + } + } }, - "closed": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "forbidden": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + }, + "otlp": { + "properties": { + "grpc": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "concurrency": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "metrics": { + "properties": { + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "unauthorized": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "internal": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "decode": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "http": { + "properties": { + "logs": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "method": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } }, - "valid": { + "metrics": { "properties": { - "ok": { - "type": "long" + "consumer": { + "properties": { + "unsupported_dropped": { + "type": "long" + } + } }, - "accepted": { - "type": "long" + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "count": { - "type": "long" + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } + } + } + }, + "traces": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "unauthorized": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "count": { + "type": "long" + } + } + } + } } } } @@ -506,195 +829,138 @@ } } }, - "decoder": { + "processor": { "properties": { - "deflate": { + "error": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "gzip": { + "metric": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "uncompressed": { + "span": { "properties": { - "content-length": { - "type": "long" - }, - "count": { + "transformations": { "type": "long" } } }, - "reader": { + "stream": { "properties": { - "size": { + "accepted": { "type": "long" }, - "count": { - "type": "long" + "errors": { + "properties": { + "invalid": { + "type": "long" + }, + "toolarge": { + "type": "long" + } + } } } }, - "missing-content-length": { + "transaction": { "properties": { - "count": { + "transformations": { "type": "long" } } } } - }, - "processor": { + "root": { "properties": { - "metric": { + "request": { "properties": { - "decoding": { - "properties": { - "errors": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { - "type": "long" - }, - "count": { - "type": "long" - } - } - }, - "transformations": { + "count": { "type": "long" } } }, - "sourcemap": { + "response": { "properties": { - "counter": { + "count": { "type": "long" }, - "decoding": { + "errors": { "properties": { - "errors": { + "closed": { "type": "long" }, "count": { "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { + }, + "decode": { "type": "long" }, - "count": { + "forbidden": { "type": "long" - } - } - } - } - }, - "transaction": { - "properties": { - "decoding": { - "properties": { - "errors": { + }, + "internal": { "type": "long" }, - "count": { + "invalidquery": { "type": "long" - } - } - }, - "validation": { - "properties": { - "errors": { + }, + "method": { "type": "long" }, - "count": { + "notfound": { "type": "long" - } - } - }, - "transformations": { - "type": "long" - }, - "transactions": { - "type": "long" - }, - "spans": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "frames": { - "type": "long" - } - } - }, - "error": { - "properties": { - "decoding": { - "properties": { - "errors": { + }, + "queue": { "type": "long" }, - "count": { + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { "type": "long" } } }, - "validation": { + "valid": { "properties": { - "errors": { + "accepted": { "type": "long" }, "count": { "type": "long" + }, + "notmodified": { + "type": "long" + }, + "ok": { + "type": "long" } } - }, - "transformations": { - "type": "long" - }, - "errors": { - "type": "long" - }, - "stacktraces": { - "type": "long" - }, - "frames": { - "type": "long" } } }, - "span": { - "properties": { - "transformations": { - "type": "long" - } - } + "unset": { + "type": "long" } } }, @@ -702,42 +968,95 @@ "properties": { "transactions_dropped": { "type": "long" + } + } + }, + "server": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } }, - "tail": { + "response": { "properties": { - "dynamic_service_groups": { + "count": { "type": "long" }, - "storage": { + "errors": { "properties": { - "lsm_size": { + "closed": { "type": "long" }, - "value_log_size": { + "concurrency": { + "type": "long" + }, + "count": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "internal": { + "type": "long" + }, + "invalidquery": { + "type": "long" + }, + "method": { + "type": "long" + }, + "notfound": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "timeout": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "unavailable": { + "type": "long" + }, + "validate": { "type": "long" } } }, - "events": { + "valid": { "properties": { - "processed": { - "type": "long" - }, - "dropped": { + "accepted": { "type": "long" }, - "stored": { + "count": { "type": "long" }, - "sampled": { + "notmodified": { "type": "long" }, - "head_unsampled": { + "ok": { "type": "long" } } } } + }, + "unset": { + "type": "long" } } } @@ -893,6 +1212,37 @@ } } } + }, + "output": { + "properties": { + "elasticsearch": { + "properties": { + "bulk_requests": { + "properties": { + "available": { + "type": "long" + }, + "completed": { + "type": "long" + } + } + }, + "indexers": { + "properties": { + "active": { + "type": "long" + }, + "created": { + "type": "long" + }, + "destroyed": { + "type": "long" + } + } + } + } + } + } } } }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json index 3d5e5d0fdc9b7..e58a3cbd39f97 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json @@ -76,6 +76,12 @@ "type": "date", "format": "epoch_millis" }, + "protocol": { + "type": "keyword" + }, + "env_https_proxy": { + "type": "keyword" + }, "config.bpf_log_level": { "type": "long" }, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json index 9271718bd27ed..a23fa60021a05 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json @@ -72,11 +72,8 @@ "store": false }, /* - pairs of (32bit PC offset, 32bit line number) followed by 64bit PC range base at the end. - To find line number for a given PC: find lowest offset such as offsetBase+PC >= offset, then read corresponding line number. - offsetBase could seemingly be available from exec_pc_range (it's the first value of the pair), but it's not the case. - Ranges are stored as points, which cannot be retrieve when disabling _source. - See https://www.elastic.co/guide/en/elasticsearch/reference/current/point.html . + To find the line number for a given address: find the first offset in Symbol.linetable.offsets such that offset <= base+address, + then read corresponding line number (at the same index) in Symbol.linetable.lines. Linetable: base for offsets (64bit PC range base) */ diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DimensionFieldValueFetcher.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DimensionFieldValueFetcher.java index c6ef43cfdacfa..342b6e57c9e51 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DimensionFieldValueFetcher.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DimensionFieldValueFetcher.java @@ -9,6 +9,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import java.util.ArrayList; @@ -19,13 +20,12 @@ public class DimensionFieldValueFetcher extends FieldValueFetcher { private final DimensionFieldProducer dimensionFieldProducer = createFieldProducer(); - protected DimensionFieldValueFetcher(final MappedFieldType fieldType, final IndexFieldData fieldData) { - super(fieldType.name(), fieldType, fieldData); + protected DimensionFieldValueFetcher(final String fieldName, final MappedFieldType fieldType, final IndexFieldData fieldData) { + super(fieldName, fieldType, fieldData); } private DimensionFieldProducer createFieldProducer() { - final String filedName = fieldType.name(); - return new DimensionFieldProducer(filedName, new DimensionFieldProducer.Dimension(filedName)); + return new DimensionFieldProducer(name, new DimensionFieldProducer.Dimension(name)); } @Override @@ -42,12 +42,18 @@ static List create(final SearchExecutionContext context, fina MappedFieldType fieldType = context.getFieldType(dimension); assert fieldType != null : "Unknown dimension field type for dimension field: [" + dimension + "]"; - if (context.fieldExistsInIndex(dimension)) { + if (context.fieldExistsInIndex(fieldType.name())) { final IndexFieldData fieldData = context.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH); - final String fieldName = context.isMultiField(dimension) - ? fieldType.name().substring(0, fieldType.name().lastIndexOf('.')) - : fieldType.name(); - fetchers.add(new DimensionFieldValueFetcher(fieldType, fieldData)); + if (fieldType instanceof FlattenedFieldMapper.KeyedFlattenedFieldType flattenedFieldType) { + // Name of the field type and name of the dimension are different in this case. + var dimensionName = flattenedFieldType.rootName() + '.' + flattenedFieldType.key(); + fetchers.add(new DimensionFieldValueFetcher(dimensionName, fieldType, fieldData)); + } else { + final String fieldName = context.isMultiField(dimension) + ? fieldType.name().substring(0, fieldType.name().lastIndexOf('.')) + : fieldType.name(); + fetchers.add(new DimensionFieldValueFetcher(fieldName, fieldType, fieldData)); + } } } return Collections.unmodifiableList(fetchers); diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TimeseriesFieldTypeHelper.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TimeseriesFieldTypeHelper.java index 691279187e1a9..e539722481df8 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TimeseriesFieldTypeHelper.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TimeseriesFieldTypeHelper.java @@ -11,6 +11,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.TimeSeriesParams; +import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; import java.io.IOException; import java.util.List; @@ -49,6 +50,19 @@ public boolean isTimeSeriesDimension(final String unused, final Map f return Boolean.TRUE.equals(fieldMapping.get(TIME_SERIES_DIMENSION_PARAM)); } + public List extractFlattenedDimensions(final String field, final Map fieldMapping) { + var mapper = mapperService.mappingLookup().getMapper(field); + if (mapper instanceof FlattenedFieldMapper == false) { + return null; + } + Object dimensions = fieldMapping.get(FlattenedFieldMapper.TIME_SERIES_DIMENSIONS_ARRAY_PARAM); + if (dimensions instanceof List actualList) { + return actualList.stream().map(field_in_flattened -> field + '.' + field_in_flattened).toList(); + } + + return null; + } + static class Builder { private final MapperService mapperService; diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index e370ab5383fd5..abf629dc9c1fa 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -310,7 +310,10 @@ protected void masterOperation( request.getDownsampleConfig().getTimestampField() ); MappingVisitor.visitMapping(sourceIndexMappings, (field, mapping) -> { - if (helper.isTimeSeriesDimension(field, mapping)) { + var flattenedDimensions = helper.extractFlattenedDimensions(field, mapping); + if (flattenedDimensions != null) { + dimensionFields.addAll(flattenedDimensions); + } else if (helper.isTimeSeriesDimension(field, mapping)) { dimensionFields.add(field); } else if (helper.isTimeSeriesMetric(field, mapping)) { metricFields.add(field); @@ -338,10 +341,22 @@ protected void masterOperation( delegate.onFailure(e); return; } + + /* + * When creating the downsample index, we copy the index.number_of_shards from source index, + * and we set the index.number_of_replicas to 0, to avoid replicating the index being built. + * Also, we set the index.refresh_interval to -1. + * We will set the correct number of replicas and refresh the index later. + * + * We should note that there is a risk of losing a node during the downsample process. In this + * case downsample will fail. + */ + int minNumReplicas = clusterService.getSettings().getAsInt(Downsample.DOWNSAMPLE_MIN_NUMBER_OF_REPLICAS_NAME, 0); + // 3. Create downsample index createDownsampleIndex( - clusterService.getSettings(), downsampleIndexName, + minNumReplicas, sourceIndexMetadata, mapping, request, @@ -350,6 +365,7 @@ protected void masterOperation( performShardDownsampling( request, delegate, + minNumReplicas, sourceIndexMetadata, downsampleIndexName, parentTask, @@ -379,6 +395,7 @@ protected void masterOperation( performShardDownsampling( request, delegate, + minNumReplicas, sourceIndexMetadata, downsampleIndexName, parentTask, @@ -448,6 +465,7 @@ private boolean canShortCircuit( private void performShardDownsampling( DownsampleAction.Request request, ActionListener listener, + int minNumReplicas, IndexMetadata sourceIndexMetadata, String downsampleIndexName, TaskId parentTask, @@ -506,7 +524,15 @@ public void onResponse(PersistentTasksCustomMetadata.PersistentTask listener, + int minNumReplicas, final IndexMetadata sourceIndexMetadata, final String downsampleIndexName, final TaskId parentTask, @@ -561,7 +588,7 @@ private void updateTargetIndexSettingStep( // 4. Make downsample index read-only and set the correct number of replicas final Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_BLOCKS_WRITE, true); // Number of replicas had been previously set to 0 to speed up index population - if (sourceIndexMetadata.getNumberOfReplicas() > 0) { + if (sourceIndexMetadata.getNumberOfReplicas() > 0 && minNumReplicas == 0) { settings.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetadata.getNumberOfReplicas()); } // Setting index.hidden has been initially set to true. We revert this to the value of the @@ -839,28 +866,18 @@ private static void addDynamicTemplates(final XContentBuilder builder) throws IO } private void createDownsampleIndex( - Settings settings, String downsampleIndexName, + int minNumReplicas, IndexMetadata sourceIndexMetadata, String mapping, DownsampleAction.Request request, ActionListener listener ) { - /* - * When creating the downsample index, we copy the index.number_of_shards from source index, - * and we set the index.number_of_replicas to 0, to avoid replicating the index being built. - * Also, we set the index.refresh_interval to -1. - * We will set the correct number of replicas and refresh the index later. - * - * We should note that there is a risk of losing a node during the downsample process. In this - * case downsample will fail. - */ - int numberOfReplicas = settings.getAsInt(Downsample.DOWNSAMPLE_MIN_NUMBER_OF_REPLICAS_NAME, 0); var downsampleInterval = request.getDownsampleConfig().getInterval().toString(); Settings.Builder builder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, sourceIndexMetadata.getNumberOfShards()) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, String.valueOf(numberOfReplicas)) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, minNumReplicas) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey(), DownsampleTaskStatus.STARTED) .put(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey(), downsampleInterval) diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 80bb0368a1afc..e4db5a253f996 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -70,7 +70,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalTopHits; -import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; @@ -134,6 +133,8 @@ public class DownsampleActionSingleNodeTests extends ESSingleNodeTestCase { public static final String FIELD_TIMESTAMP = "@timestamp"; public static final String FIELD_DIMENSION_1 = "dimension_kw"; public static final String FIELD_DIMENSION_2 = "dimension_long"; + public static final String FIELD_DIMENSION_3 = "dimension_flattened"; + public static final String FIELD_DIMENSION_4 = "dimension_kw_multifield"; public static final String FIELD_NUMERIC_1 = "numeric_1"; public static final String FIELD_NUMERIC_2 = "numeric_2"; public static final String FIELD_AGG_METRIC = "agg_metric_1"; @@ -212,6 +213,19 @@ public void setup() throws IOException { // Dimensions mapping.startObject(FIELD_DIMENSION_1).field("type", "keyword").field("time_series_dimension", true).endObject(); mapping.startObject(FIELD_DIMENSION_2).field("type", "long").field("time_series_dimension", true).endObject(); + mapping.startObject(FIELD_DIMENSION_3) + .field("type", "flattened") + .array("time_series_dimensions", "level1_value", "level1_obj.level2_value") + .endObject(); + mapping.startObject(FIELD_DIMENSION_4) + .field("type", "text") + .startObject("fields") + .startObject("keyword") + .field("type", "keyword") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); // Metrics mapping.startObject(FIELD_NUMERIC_1).field("type", "long").field("time_series_metric", "gauge").endObject(); @@ -307,6 +321,42 @@ public void testDownsampleIndex() throws Exception { assertDownsampleIndex(sourceIndex, downsampleIndex, config); } + public void testDownsampleIndexWithFlattenedAndMultiFieldDimensions() throws Exception { + DownsampleConfig config = new DownsampleConfig(randomInterval()); + SourceSupplier sourceSupplier = () -> { + String ts = randomDateForInterval(config.getInterval()); + double labelDoubleValue = DATE_FORMATTER.parseMillis(ts); + return XContentFactory.jsonBuilder() + .startObject() + .field(FIELD_TIMESTAMP, ts) + .field(FIELD_DIMENSION_1, "dim1") // not important for this test + .startObject(FIELD_DIMENSION_3) + .field("level1_value", randomFrom(dimensionValues)) + .field("level1_othervalue", randomFrom(dimensionValues)) + .startObject("level1_object") + .field("level2_value", randomFrom(dimensionValues)) + .field("level2_othervalue", randomFrom(dimensionValues)) + .endObject() + .endObject() + .field(FIELD_DIMENSION_4, randomFrom(dimensionValues)) + .field(FIELD_NUMERIC_1, randomInt()) + .field(FIELD_NUMERIC_2, DATE_FORMATTER.parseMillis(ts)) + .startObject(FIELD_AGG_METRIC) + .field("min", randomDoubleBetween(-2000, -1001, true)) + .field("max", randomDoubleBetween(-1000, 1000, true)) + .field("sum", randomIntBetween(100, 10000)) + .field("value_count", randomIntBetween(100, 1000)) + .endObject() + .field(FIELD_LABEL_DOUBLE, labelDoubleValue) + .field(FIELD_METRIC_LABEL_DOUBLE, labelDoubleValue) + .endObject(); + }; + bulkIndex(sourceSupplier); + prepareSourceIndex(sourceIndex, true); + downsample(sourceIndex, downsampleIndex, config); + assertDownsampleIndex(sourceIndex, downsampleIndex, config); + } + public void testDownsampleOfDownsample() throws Exception { int intervalMinutes = randomIntBetween(10, 120); DownsampleConfig config = new DownsampleConfig(DateHistogramInterval.minutes(intervalMinutes)); @@ -1103,7 +1153,7 @@ private RolloverResponse rollover(String dataStreamName) throws ExecutionExcepti } private InternalAggregations aggregate(final String index, AggregationBuilder aggregationBuilder) { - var resp = client().prepareSearch(index).addAggregation(aggregationBuilder).get(); + var resp = client().prepareSearch(index).setSize(0).addAggregation(aggregationBuilder).get(); try { return resp.getAggregations(); } finally { @@ -1220,12 +1270,15 @@ private void assertDownsampleIndexAggregations( Map labelFields ) { final AggregationBuilder aggregations = buildAggregations(config, metricFields, labelFields, config.getTimestampField()); - InternalAggregations origResp = aggregate(sourceIndex, aggregations); - InternalAggregations downsampleResp = aggregate(downsampleIndex, aggregations); - assertEquals(origResp.asMap().keySet(), downsampleResp.asMap().keySet()); + List origList = aggregate(sourceIndex, aggregations).asList(); + List downsampleList = aggregate(downsampleIndex, aggregations).asList(); + assertEquals(origList.size(), downsampleList.size()); + for (int i = 0; i < origList.size(); i++) { + assertEquals(origList.get(i).getName(), downsampleList.get(i).getName()); + } - StringTerms originalTsIdTermsAggregation = (StringTerms) origResp.getAsMap().values().stream().toList().get(0); - StringTerms downsampleTsIdTermsAggregation = (StringTerms) downsampleResp.getAsMap().values().stream().toList().get(0); + StringTerms originalTsIdTermsAggregation = (StringTerms) origList.get(0); + StringTerms downsampleTsIdTermsAggregation = (StringTerms) downsampleList.get(0); originalTsIdTermsAggregation.getBuckets().forEach(originalBucket -> { StringTerms.Bucket downsampleBucket = downsampleTsIdTermsAggregation.getBucketByKey(originalBucket.getKeyAsString()); @@ -1268,7 +1321,7 @@ private void assertDownsampleIndexAggregations( .stream() .filter(agg -> agg.getType().equals("top_hits")) .toList(); - assertEquals(topHitsDownsampleAggregations.size(), topHitsDownsampleAggregations.size()); + assertEquals(topHitsOriginalAggregations.size(), topHitsDownsampleAggregations.size()); for (int j = 0; j < topHitsDownsampleAggregations.size(); ++j) { InternalTopHits originalTopHits = (InternalTopHits) topHitsOriginalAggregations.get(j); @@ -1304,14 +1357,17 @@ private void assertDownsampleIndexAggregations( originalFieldsList.contains(field) ) ); - Object originalLabelValue = originalHit.getDocumentFields().values().stream().toList().get(0).getValue(); - Object downsampleLabelValue = downsampleHit.getDocumentFields().values().stream().toList().get(0).getValue(); - Optional labelAsMetric = nonTopHitsOriginalAggregations.stream() + String labelName = originalHit.getDocumentFields().values().stream().findFirst().get().getName(); + Object originalLabelValue = originalHit.getDocumentFields().values().stream().findFirst().get().getValue(); + Object downsampleLabelValue = downsampleHit.getDocumentFields().values().stream().findFirst().get().getValue(); + Optional labelAsMetric = topHitsOriginalAggregations.stream() .filter(agg -> agg.getName().equals("metric_" + downsampleTopHits.getName())) .findFirst(); // NOTE: this check is possible only if the label can be indexed as a metric (the label is a numeric field) if (labelAsMetric.isPresent()) { - double metricValue = ((Max) labelAsMetric.get()).value(); + double metricValue = ((InternalTopHits) labelAsMetric.get()).getHits().getHits()[0].field( + "metric_" + labelName + ).getValue(); assertEquals(metricValue, downsampleLabelValue); assertEquals(metricValue, originalLabelValue); } diff --git a/x-pack/plugin/enrich/qa/rest/build.gradle b/x-pack/plugin/enrich/qa/rest/build.gradle index e8473d15ed9ef..fdaddbc1f9290 100644 --- a/x-pack/plugin/enrich/qa/rest/build.gradle +++ b/x-pack/plugin/enrich/qa/rest/build.gradle @@ -8,7 +8,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams restResources { restApi { - include '_common', 'bulk', 'indices', 'index', 'ingest.delete_pipeline', 'ingest.put_pipeline', 'enrich', 'get' + include '_common', 'bulk', 'indices', 'index', 'ingest.delete_pipeline', 'ingest.put_pipeline', 'enrich', 'get', 'capabilities' } restTests { includeXpack 'enrich' diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java index e36707b0b8bc4..35c2071188864 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java @@ -50,10 +50,11 @@ */ public final class EnrichCache { - private final Cache>> cache; + private final Cache cache; private final LongSupplier relativeNanoTimeProvider; private final AtomicLong hitsTimeInNanos = new AtomicLong(0); private final AtomicLong missesTimeInNanos = new AtomicLong(0); + private final AtomicLong sizeInBytes = new AtomicLong(0); private volatile Metadata metadata; EnrichCache(long maxSize) { @@ -63,7 +64,9 @@ public final class EnrichCache { // non-private for unit testing only EnrichCache(long maxSize, LongSupplier relativeNanoTimeProvider) { this.relativeNanoTimeProvider = relativeNanoTimeProvider; - this.cache = CacheBuilder.>>builder().setMaximumWeight(maxSize).build(); + this.cache = CacheBuilder.builder().setMaximumWeight(maxSize).removalListener(notification -> { + sizeInBytes.getAndAdd(-1 * notification.getValue().sizeInBytes); + }).build(); } /** @@ -86,12 +89,11 @@ public void computeIfAbsent( hitsTimeInNanos.addAndGet(cacheRequestTime); listener.onResponse(response); } else { - final long retrieveStart = relativeNanoTimeProvider.getAsLong(); searchResponseFetcher.accept(searchRequest, ActionListener.wrap(resp -> { - List> value = toCacheValue(resp); + CacheValue value = toCacheValue(resp); put(searchRequest, value); - List> copy = deepCopy(value, false); + List> copy = deepCopy(value.hits, false); long databaseQueryAndCachePutTime = relativeNanoTimeProvider.getAsLong() - retrieveStart; missesTimeInNanos.addAndGet(cacheRequestTime + databaseQueryAndCachePutTime); listener.onResponse(copy); @@ -104,20 +106,21 @@ public void computeIfAbsent( String enrichIndex = getEnrichIndexKey(searchRequest); CacheKey cacheKey = new CacheKey(enrichIndex, searchRequest); - List> response = cache.get(cacheKey); + CacheValue response = cache.get(cacheKey); if (response != null) { - return deepCopy(response, false); + return deepCopy(response.hits, false); } else { return null; } } // non-private for unit testing only - void put(SearchRequest searchRequest, List> response) { + void put(SearchRequest searchRequest, CacheValue cacheValue) { String enrichIndex = getEnrichIndexKey(searchRequest); CacheKey cacheKey = new CacheKey(enrichIndex, searchRequest); - cache.put(cacheKey, response); + cache.put(cacheKey, cacheValue); + sizeInBytes.addAndGet(cacheValue.sizeInBytes); } void setMetadata(Metadata metadata) { @@ -133,7 +136,8 @@ public EnrichStatsAction.Response.CacheStats getStats(String localNodeId) { cacheStats.getMisses(), cacheStats.getEvictions(), TimeValue.nsecToMSec(hitsTimeInNanos.get()), - TimeValue.nsecToMSec(missesTimeInNanos.get()) + TimeValue.nsecToMSec(missesTimeInNanos.get()), + sizeInBytes.get() ); } @@ -146,12 +150,14 @@ private String getEnrichIndexKey(SearchRequest searchRequest) { return ia.getIndices().get(0).getName(); } - static List> toCacheValue(SearchResponse response) { + static CacheValue toCacheValue(SearchResponse response) { List> result = new ArrayList<>(response.getHits().getHits().length); + long size = 0; for (SearchHit hit : response.getHits()) { result.add(deepCopy(hit.getSourceAsMap(), true)); + size += hit.getSourceRef() != null ? hit.getSourceRef().ramBytesUsed() : 0; } - return Collections.unmodifiableList(result); + return new CacheValue(Collections.unmodifiableList(result), size); } @SuppressWarnings("unchecked") @@ -205,4 +211,6 @@ public int hashCode() { } } + // Visibility for testing + record CacheValue(List> hits, Long sizeInBytes) {} } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index 5cb9c0cf9c051..ca00f49100279 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -85,6 +85,8 @@ public class EnrichPolicyRunner implements Runnable { static final String ENRICH_MATCH_FIELD_NAME = "enrich_match_field"; static final String ENRICH_README_FIELD_NAME = "enrich_readme"; + public static final String ENRICH_MIN_NUMBER_OF_REPLICAS_NAME = "enrich.min_number_of_replicas"; + static final String ENRICH_INDEX_README_TEXT = "This index is managed by Elasticsearch and should not be modified in any way."; private final String policyName; @@ -137,7 +139,7 @@ public void run() { // This call does not set the origin to ensure that the user executing the policy has permission to access the source index client.admin().indices().getIndex(getIndexRequest, listener.delegateFailureAndWrap((l, getIndexResponse) -> { validateMappings(getIndexResponse); - prepareAndCreateEnrichIndex(toMappings(getIndexResponse)); + prepareAndCreateEnrichIndex(toMappings(getIndexResponse), clusterService.getSettings()); })); } catch (Exception e) { listener.onFailure(e); @@ -434,10 +436,11 @@ static boolean isIndexableField(MapperService mapperService, String field, Strin } } - private void prepareAndCreateEnrichIndex(List> mappings) { + private void prepareAndCreateEnrichIndex(List> mappings, Settings settings) { + int numberOfReplicas = settings.getAsInt(ENRICH_MIN_NUMBER_OF_REPLICAS_NAME, 0); Settings enrichIndexSettings = Settings.builder() .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 0) + .put("index.number_of_replicas", numberOfReplicas) // No changes will be made to an enrich index after policy execution, so need to enable automatic refresh interval: .put("index.refresh_interval", -1) // This disables eager global ordinals loading for all fields: diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/rest/RestEnrichStatsAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/rest/RestEnrichStatsAction.java index 3d64e7c1380fe..2c78556df489d 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/rest/RestEnrichStatsAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/rest/RestEnrichStatsAction.java @@ -16,12 +16,15 @@ import org.elasticsearch.xpack.core.enrich.action.EnrichStatsAction; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @ServerlessScope(Scope.INTERNAL) public class RestEnrichStatsAction extends BaseRestHandler { + private static final Set SUPPORTED_CAPABILITIES = Set.of("size-in-bytes"); + @Override public List routes() { return List.of(new Route(GET, "/_enrich/_stats")); @@ -32,6 +35,11 @@ public String getName() { return "enrich_stats"; } + @Override + public Set supportedCapabilities() { + return SUPPORTED_CAPABILITIES; + } + @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { final var request = new EnrichStatsAction.Request(RestUtils.getMasterNodeTimeout(restRequest)); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java index f2f2948db41ee..19af929017a3b 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichCacheTests.java @@ -79,7 +79,7 @@ public void testCaching() { new SearchSourceBuilder().query(new MatchQueryBuilder("match_field", "2")) ); // Emulated search response (content doesn't matter, since it isn't used, it just a cache entry) - List> searchResponse = List.of(Map.of("test", "entry")); + EnrichCache.CacheValue searchResponse = new EnrichCache.CacheValue(List.of(Map.of("test", "entry")), 1L); EnrichCache enrichCache = new EnrichCache(3); enrichCache.setMetadata(metadata); @@ -91,6 +91,7 @@ public void testCaching() { assertThat(cacheStats.hits(), equalTo(0L)); assertThat(cacheStats.misses(), equalTo(0L)); assertThat(cacheStats.evictions(), equalTo(0L)); + assertThat(cacheStats.cacheSizeInBytes(), equalTo(3L)); assertThat(enrichCache.get(searchRequest1), notNullValue()); assertThat(enrichCache.get(searchRequest2), notNullValue()); @@ -101,6 +102,7 @@ public void testCaching() { assertThat(cacheStats.hits(), equalTo(3L)); assertThat(cacheStats.misses(), equalTo(1L)); assertThat(cacheStats.evictions(), equalTo(0L)); + assertThat(cacheStats.cacheSizeInBytes(), equalTo(3L)); enrichCache.put(searchRequest4, searchResponse); cacheStats = enrichCache.getStats("_id"); @@ -108,6 +110,7 @@ public void testCaching() { assertThat(cacheStats.hits(), equalTo(3L)); assertThat(cacheStats.misses(), equalTo(1L)); assertThat(cacheStats.evictions(), equalTo(1L)); + assertThat(cacheStats.cacheSizeInBytes(), equalTo(3L)); // Simulate enrich policy execution, which should make current cache entries unused. metadata = Metadata.builder() @@ -149,6 +152,7 @@ public void testCaching() { assertThat(cacheStats.hits(), equalTo(6L)); assertThat(cacheStats.misses(), equalTo(6L)); assertThat(cacheStats.evictions(), equalTo(4L)); + assertThat(cacheStats.cacheSizeInBytes(), equalTo(3L)); } public void testComputeIfAbsent() throws InterruptedException { @@ -331,7 +335,7 @@ public void testEnrichIndexNotExist() { new SearchSourceBuilder().query(new MatchQueryBuilder("test", "query")) ); // Emulated search response (content doesn't matter, since it isn't used, it just a cache entry) - List> searchResponse = List.of(Map.of("test", "entry")); + EnrichCache.CacheValue searchResponse = new EnrichCache.CacheValue(List.of(Map.of("test", "entry")), 1L); EnrichCache enrichCache = new EnrichCache(1); enrichCache.setMetadata(metadata); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichStatsResponseTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichStatsResponseTests.java index 14e3008cda02f..aec184472d41e 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichStatsResponseTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichStatsResponseTests.java @@ -51,6 +51,7 @@ protected EnrichStatsAction.Response createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong() ) ); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/monitoring/collector/enrich/EnrichStatsCollectorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/monitoring/collector/enrich/EnrichStatsCollectorTests.java index a38b2605c1ff0..2a069eb596760 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/monitoring/collector/enrich/EnrichStatsCollectorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/monitoring/collector/enrich/EnrichStatsCollectorTests.java @@ -93,6 +93,7 @@ public void testDoCollect() throws Exception { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong() ) ); diff --git a/x-pack/plugin/ent-search/qa/rest/build.gradle b/x-pack/plugin/ent-search/qa/rest/build.gradle index c24b0ffd44c65..5b04a326f142c 100644 --- a/x-pack/plugin/ent-search/qa/rest/build.gradle +++ b/x-pack/plugin/ent-search/qa/rest/build.gradle @@ -14,8 +14,7 @@ restResources { 'nodes', 'indices', 'index', - 'query_ruleset', - 'query_rule', + 'query_rules', 'search_application', 'xpack', 'security', @@ -34,3 +33,7 @@ testClusters.configureEach { user username: 'entsearch-user', password: 'entsearch-user-password', role: 'user' user username: 'entsearch-unprivileged', password: 'entsearch-unprivileged-password', role: 'unprivileged' } + +artifacts { + restXpackTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_usage.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_usage.yml index 2d7b56bc175eb..81eaa24fd6f5d 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_usage.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_usage.yml @@ -28,7 +28,7 @@ teardown: --- "xpack usage includes Enterprise Search": - do: - xpack.usage: {} + xpack.usage: { } - match: { enterprise_search: { @@ -36,7 +36,7 @@ teardown: available: true, search_applications: { count: 0 }, analytics_collections: { count: 0 }, - query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } + query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } } } @@ -53,7 +53,7 @@ teardown: query: "{{query_string}}" - do: - xpack.usage: {} + xpack.usage: { } - match: { enterprise_search: { @@ -61,7 +61,7 @@ teardown: available: true, search_applications: { count: 1 }, analytics_collections: { count: 0 }, - query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } + query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } } } @@ -82,7 +82,7 @@ teardown: name: test-analytics-collection - do: - xpack.usage: {} + xpack.usage: { } - match: { enterprise_search: { @@ -90,7 +90,7 @@ teardown: available: true, search_applications: { count: 2 }, analytics_collections: { count: 1 }, - query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } + query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } } } @@ -99,7 +99,7 @@ teardown: name: test-search-application-2 - do: - xpack.usage: {} + xpack.usage: { } - match: { enterprise_search: { @@ -107,7 +107,7 @@ teardown: available: true, search_applications: { count: 1 }, analytics_collections: { count: 1 }, - query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } + query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } } } @@ -116,7 +116,7 @@ teardown: name: test-analytics-collection - do: - xpack.usage: {} + xpack.usage: { } - match: { enterprise_search: { @@ -124,12 +124,12 @@ teardown: available: true, search_applications: { count: 1 }, analytics_collections: { count: 0 }, - query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } + query_rulesets: { total_count: 0, total_rule_count: 0, min_rule_count: 0, max_rule_count: 0 } } } - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset body: rules: @@ -155,7 +155,7 @@ teardown: - 'id4' - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset2 body: rules: @@ -199,12 +199,12 @@ teardown: available: true, search_applications: { count: 1 }, analytics_collections: { count: 0 }, - query_rulesets: { total_count: 2, total_rule_count: 5, min_rule_count: 2, max_rule_count: 3, rule_criteria_total_counts: { exact: 5 } } + query_rulesets: { total_count: 2, total_rule_count: 5, min_rule_count: 2, max_rule_count: 3, rule_criteria_total_counts: { exact: 5 } } } } - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset2 - do: @@ -216,7 +216,7 @@ teardown: available: true, search_applications: { count: 1 }, analytics_collections: { count: 0 }, - query_rulesets: { total_count: 1, total_rule_count: 2, min_rule_count: 2, max_rule_count: 2, rule_criteria_total_counts: { exact: 2 } } + query_rulesets: { total_count: 1, total_rule_count: 2, min_rule_count: 2, max_rule_count: 2, rule_criteria_total_counts: { exact: 2 } } } } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/100_connector_update_error.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/100_connector_update_error.yml index a58f2399301d3..5943f9208c50f 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/100_connector_update_error.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/100_connector_update_error.yml @@ -29,6 +29,7 @@ setup: connector_id: test-connector - match: { error: "some error" } + - match: { status: error } --- @@ -59,6 +60,7 @@ setup: connector_id: test-connector - match: { error: null } + - match: { status: connected } --- "Update Connector Error - 404 when connector doesn't exist": diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml index 5cfb016e1b6df..b0f850d09f76d 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml @@ -76,6 +76,42 @@ setup: - match: { custom_scheduling: {} } - match: { filtering.0.domain: DEFAULT } + +--- +'Create Connector - Check for missing keys': + - do: + connector.put: + connector_id: test-connector + body: + index_name: search-test + name: my-connector + language: pl + is_native: false + service_type: super-connector + + - match: { result: 'created' } + + - do: + connector.get: + connector_id: test-connector + + - match: { id: test-connector } + - match: { index_name: search-test } + - match: { name: my-connector } + - match: { language: pl } + - match: { is_native: false } + - match: { service_type: super-connector } + + # check keys that are not populated upon connector creation + - is_false: api_key_id + - is_false: api_key_secret_id + - is_false: description + - is_false: error + - is_false: features + - is_false: last_seen + - is_false: sync_cursor + + --- 'Create Connector - Resource already exists': - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/sync_job/70_connector_sync_job_update_stats.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/sync_job/70_connector_sync_job_update_stats.yml index 85156bf800582..31dfea2e01d11 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/sync_job/70_connector_sync_job_update_stats.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/sync_job/70_connector_sync_job_update_stats.yml @@ -184,6 +184,77 @@ setup: - match: { indexed_document_volume: 1000 } - match: { last_seen: 2023-12-04T08:45:50.567149Z } +--- +"Update the ingestion stats for a connector sync job - with optional metadata": + - do: + connector.sync_job_post: + body: + id: test-connector + job_type: full + trigger_method: on_demand + - set: { id: id } + + - do: + connector.sync_job_update_stats: + connector_sync_job_id: $id + body: + deleted_document_count: 10 + indexed_document_count: 20 + indexed_document_volume: 1000 + metadata: { someKey1: test, someKey2: test2 } + + - match: { result: updated } + + - do: + connector.sync_job_get: + connector_sync_job_id: $id + + - match: { deleted_document_count: 10 } + - match: { indexed_document_count: 20 } + - match: { indexed_document_volume: 1000 } + - match: { metadata: { someKey1: test, someKey2: test2 } } + + +--- +"Update the ingestion stats for a connector sync job - metadata wrong type string": + - do: + connector.sync_job_post: + body: + id: test-connector + job_type: full + trigger_method: on_demand + - set: { id: id } + + - do: + catch: bad_request + connector.sync_job_update_stats: + connector_sync_job_id: $id + body: + deleted_document_count: 10 + indexed_document_count: 20 + indexed_document_volume: 1000 + metadata: "abc" + +--- +"Update the ingestion stats for a connector sync job - metadata wrong type number": + - do: + connector.sync_job_post: + body: + id: test-connector + job_type: full + trigger_method: on_demand + - set: { id: id } + + - do: + catch: bad_request + connector.sync_job_update_stats: + connector_sync_job_id: $id + body: + deleted_document_count: 10 + indexed_document_count: 20 + indexed_document_volume: 1000 + metadata: 123 + --- "Update the ingestion stats for a Connector Sync Job - Connector Sync Job does not exist": - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml index f3f37e41ec756..a1f9eeccf2002 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml @@ -6,19 +6,19 @@ setup: --- teardown: - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-ruleset ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-recreating ignore: 404 --- 'Create Query Ruleset': - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-ruleset body: rules: @@ -48,7 +48,7 @@ teardown: - match: { result: 'created' } - do: - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-ruleset - match: { ruleset_id: test-ruleset } - match: @@ -79,7 +79,7 @@ teardown: --- 'Create Query Ruleset - Resource already exists': - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset-recreating body: rules: @@ -96,7 +96,7 @@ teardown: - match: { result: 'created' } - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset-recreating body: rules: @@ -120,7 +120,7 @@ teardown: - do: catch: forbidden headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: forbidden-query-ruleset body: rules: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml index b30f1c2418f4f..f2ced956b5369 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml @@ -3,7 +3,7 @@ setup: cluster_features: [ "gte_v8.10.0" ] reason: Introduced in 8.10.0 - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset-3 body: rules: @@ -29,7 +29,7 @@ setup: - 'id4' - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset-1 body: rules: @@ -65,7 +65,7 @@ setup: - 'id6' - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset-2 body: rules: @@ -112,29 +112,29 @@ setup: --- teardown: - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-1 ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-2 ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-3 ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: a-test-query-ruleset-with-lots-of-criteria ignore: 404 --- "List Query Rulesets": - do: - query_ruleset.list: { } + query_rules.list_rulesets: { } - match: { count: 3 } @@ -154,7 +154,7 @@ teardown: --- "List Query Rulesets - with from": - do: - query_ruleset.list: + query_rules.list_rulesets: from: 1 - match: { count: 3 } @@ -171,7 +171,7 @@ teardown: --- "List Query Rulesets - with size": - do: - query_ruleset.list: + query_rules.list_rulesets: size: 2 - match: { count: 3 } @@ -188,26 +188,26 @@ teardown: --- "List Query Rulesets - empty": - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-1 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-2 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-3 - do: - query_ruleset.list: { } + query_rules.list_rulesets: { } - match: { count: 0 } --- "List Query Rulesets with multiple rules": - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: a-test-query-ruleset-with-lots-of-criteria body: rules: @@ -267,7 +267,7 @@ teardown: - 'id10' - do: - query_ruleset.list: + query_rules.list_rulesets: from: 0 size: 1 @@ -293,7 +293,7 @@ teardown: - do: catch: forbidden headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user - query_ruleset.list: { } + query_rules.list_rulesets: { } - match: { error.type: 'security_exception' } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/30_query_ruleset_delete.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/30_query_ruleset_delete.yml index 81e3e6c8411f7..91dc0581659a6 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/30_query_ruleset_delete.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/30_query_ruleset_delete.yml @@ -3,7 +3,7 @@ setup: cluster_features: [ "gte_v8.10.0" ] reason: Introduced in 8.10.0 - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset-to-delete body: rules: @@ -21,21 +21,21 @@ setup: --- "Delete Query Ruleset": - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-to-delete - match: { acknowledged: true } - do: catch: "missing" - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-query-ruleset-to-delete --- "Delete Query Ruleset - Ruleset does not exist": - do: catch: "missing" - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-nonexistent-query-ruleset --- @@ -46,7 +46,7 @@ setup: - do: catch: forbidden headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-to-delete - match: { error.type: 'security_exception' } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml index bfd4c5e8a831e..078e24d86f1c8 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml @@ -40,7 +40,7 @@ setup: - { "text": "observability" } - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-ruleset body: rules: @@ -84,7 +84,7 @@ setup: - 'doc7' - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: another-test-ruleset body: rules: @@ -101,17 +101,17 @@ setup: --- teardown: - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-ruleset ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: another-test-ruleset ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: combined-ruleset ignore: 404 @@ -314,7 +314,7 @@ teardown: "Perform a rule query over a ruleset with combined numeric and text rule matching": - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: combined-ruleset body: rules: @@ -426,7 +426,7 @@ teardown: wait_for: started - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: combined-ruleset body: rules: @@ -455,10 +455,10 @@ teardown: query: rule: organic: - text_expansion: - ml.tokens: - model_id: text_expansion_model - model_text: "octopus comforter smells" + sparse_vector: + field: ml.tokens + inference_id: text_expansion_model + query: "octopus comforter smells" match_criteria: foo: bar ruleset_ids: @@ -473,10 +473,10 @@ teardown: query: rule: organic: - text_expansion: - ml.tokens: - model_id: text_expansion_model - model_text: "octopus comforter smells" + sparse_vector: + field: ml.tokens + inference_id: text_expansion_model + query: "octopus comforter smells" match_criteria: foo: baz ruleset_ids: @@ -491,10 +491,10 @@ teardown: query: rule: organic: - text_expansion: - ml.tokens: - model_id: text_expansion_model - model_text: "octopus comforter smells" + sparse_vector: + field: ml.tokens + inference_id: text_expansion_model + query: "octopus comforter smells" match_criteria: foo: puggle ruleset_ids: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml index a89cf7a24c2fa..fb3d7be9d2367 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml @@ -7,17 +7,17 @@ setup: --- teardown: - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-ruleset ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-rule-recreating ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: forbidden-query-ruleset ignore: 404 @@ -25,7 +25,7 @@ teardown: --- 'Create query rule with nonexistant ruleset that is also created': - do: - query_rule.put: + query_rules.put_rule: ruleset_id: new-ruleset rule_id: query-rule-id body: @@ -43,7 +43,7 @@ teardown: - match: { result: 'created' } - do: - query_rule.get: + query_rules.get_rule: ruleset_id: new-ruleset rule_id: query-rule-id @@ -55,7 +55,7 @@ teardown: # Update the same rule in place - do: - query_rule.put: + query_rules.put_rule: ruleset_id: new-ruleset rule_id: query-rule-id body: @@ -72,7 +72,7 @@ teardown: - match: { result: 'updated' } - do: - query_rule.get: + query_rules.get_rule: ruleset_id: new-ruleset rule_id: query-rule-id @@ -86,7 +86,7 @@ teardown: 'Create query rule with existing ruleset respecting priority order': # Start with 2 rules, one that specifies priority and one that does not (should go at the end) - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-ruleset body: rules: @@ -115,7 +115,7 @@ teardown: - match: { result: 'created' } - do: - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-ruleset - match: { ruleset_id: test-ruleset } @@ -145,7 +145,7 @@ teardown: # Next, add a rule with a priority 2 - this should go in the middle - do: - query_rule.put: + query_rules.put_rule: ruleset_id: test-ruleset rule_id: query-rule-id3 body: @@ -162,7 +162,7 @@ teardown: - match: { result: 'created' } - do: - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-ruleset - match: { ruleset_id: test-ruleset } @@ -202,7 +202,7 @@ teardown: # Finally, add another single rule with no priority. This should be appended to the ruleset. - do: - query_rule.put: + query_rules.put_rule: ruleset_id: test-ruleset rule_id: query-rule-id4 body: @@ -218,7 +218,7 @@ teardown: - match: { result: 'created' } - do: - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-ruleset - match: { ruleset_id: test-ruleset } @@ -269,7 +269,7 @@ teardown: --- 'Create Query Rule - Resource already exists': - do: - query_rule.put: + query_rules.put_rule: ruleset_id: test-query-rule-recreating rule_id: abc body: @@ -286,7 +286,7 @@ teardown: - match: { result: 'created' } - do: - query_rule.put: + query_rules.put_rule: ruleset_id: test-query-rule-recreating rule_id: abc body: @@ -310,7 +310,7 @@ teardown: - do: catch: forbidden headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user - query_rule.put: + query_rules.put_rule: ruleset_id: forbidden-query-ruleset rule_id: abc body: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/5_query_rulesets_before_setup.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/5_query_rulesets_before_setup.yml index cbe4f98370300..e13c23b6d3c35 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/5_query_rulesets_before_setup.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/5_query_rulesets_before_setup.yml @@ -1,26 +1,26 @@ setup: - - requires: - cluster_features: ["gte_v8.10.0"] - reason: Introduced in 8.10.0 + - requires: + cluster_features: [ "gte_v8.10.0" ] + reason: Introduced in 8.10.0 --- "Get query ruleset returns a 404 when no query rulesets exist": - do: catch: /resource_not_found_exception/ - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-i-dont-exist --- "Delete query ruleset returns a 404 when no query rulesets exist": - do: catch: /resource_not_found_exception/ - query_ruleset.delete: - ruleset_id: test-i-dont-exist + query_rules.delete_ruleset: + ruleset_id: test-i-dont-exist --- "List query rulesets returns an empty list when no query rulesets exist": - do: - query_ruleset.list: { } + query_rules.list_rulesets: { } - match: { count: 0 } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/60_query_rule_delete.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/60_query_rule_delete.yml index 63862ba666f41..033cab68e2bea 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/60_query_rule_delete.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/60_query_rule_delete.yml @@ -3,7 +3,7 @@ setup: cluster_features: [ "gte_v8.15.0" ] reason: Introduced in 8.15.0 - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset body: rules: @@ -28,7 +28,7 @@ setup: - 'id3' - 'id4' - do: - query_ruleset.put: + query_rules.put_ruleset: ruleset_id: test-query-ruleset-to-delete body: rules: @@ -55,19 +55,19 @@ setup: --- teardown: - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset ignore: 404 - do: - query_ruleset.delete: + query_rules.delete_ruleset: ruleset_id: test-query-ruleset-to-delete ignore: 404 --- "Delete Query Rule, ruleset still exists": - do: - query_rule.delete: + query_rules.delete_rule: ruleset_id: test-query-ruleset rule_id: query-rule-id1 @@ -75,12 +75,12 @@ teardown: - do: catch: "missing" - query_rule.get: + query_rules.get_rule: ruleset_id: test-query-ruleset rule_id: query-rule-id1 - do: - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-query-ruleset - match: { rules.0.rule_id: query-rule-id2 } @@ -88,7 +88,7 @@ teardown: --- "Delete Query Rule, ruleset is also deleted as it is now empty": - do: - query_rule.delete: + query_rules.delete_rule: ruleset_id: test-query-ruleset-to-delete rule_id: query-rule-id1 @@ -96,18 +96,18 @@ teardown: - do: catch: "missing" - query_rule.get: + query_rules.get_rule: ruleset_id: test-query-ruleset-to-delete rule_id: query-rule-id1 - do: - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-query-ruleset-to-delete - match: { rules.0.rule_id: query-rule-id2 } - do: - query_rule.delete: + query_rules.delete_rule: ruleset_id: test-query-ruleset-to-delete rule_id: query-rule-id2 @@ -115,20 +115,20 @@ teardown: - do: catch: "missing" - query_rule.get: + query_rules.get_rule: ruleset_id: test-query-ruleset-to-delete rule_id: query-rule-id2 - do: catch: "missing" - query_ruleset.get: + query_rules.get_ruleset: ruleset_id: test-query-ruleset-to-delete --- "Delete Query Rule - Rule does not exist": - do: catch: "missing" - query_rule.delete: + query_rules.delete_rule: ruleset_id: test-query-ruleset rule_id: nonexistent-rule @@ -136,7 +136,7 @@ teardown: "Delete Query Rule - Ruleset does not exist": - do: catch: "missing" - query_rule.delete: + query_rules.delete_rule: ruleset_id: nonexistent-query-ruleset rule_id: nonexistent-rule @@ -148,7 +148,7 @@ teardown: - do: catch: forbidden headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user - query_rule.delete: + query_rules.delete_rule: ruleset_id: test-query-ruleset rule_id: query-rule-id1 diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java index a9c488b024d49..46275bb623b7a 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java @@ -377,25 +377,61 @@ public void toInnerXContent(XContentBuilder builder, Params params) throws IOExc if (connectorId != null) { builder.field(ID_FIELD.getPreferredName(), connectorId); } - builder.field(API_KEY_ID_FIELD.getPreferredName(), apiKeyId); - builder.field(API_KEY_SECRET_ID_FIELD.getPreferredName(), apiKeySecretId); - builder.xContentValuesMap(CONFIGURATION_FIELD.getPreferredName(), configuration); - builder.xContentValuesMap(CUSTOM_SCHEDULING_FIELD.getPreferredName(), customScheduling); - builder.field(DESCRIPTION_FIELD.getPreferredName(), description); - builder.field(ERROR_FIELD.getPreferredName(), error); - builder.field(FEATURES_FIELD.getPreferredName(), features); - builder.xContentList(FILTERING_FIELD.getPreferredName(), filtering); - builder.field(INDEX_NAME_FIELD.getPreferredName(), indexName); + if (apiKeyId != null) { + builder.field(API_KEY_ID_FIELD.getPreferredName(), apiKeyId); + } + if (apiKeySecretId != null) { + builder.field(API_KEY_SECRET_ID_FIELD.getPreferredName(), apiKeySecretId); + } + if (configuration != null) { + builder.xContentValuesMap(CONFIGURATION_FIELD.getPreferredName(), configuration); + } + if (customScheduling != null) { + builder.xContentValuesMap(CUSTOM_SCHEDULING_FIELD.getPreferredName(), customScheduling); + } + if (description != null) { + builder.field(DESCRIPTION_FIELD.getPreferredName(), description); + } + if (error != null) { + builder.field(ERROR_FIELD.getPreferredName(), error); + } + if (features != null) { + builder.field(FEATURES_FIELD.getPreferredName(), features); + } + if (filtering != null) { + builder.xContentList(FILTERING_FIELD.getPreferredName(), filtering); + } + if (indexName != null) { + builder.field(INDEX_NAME_FIELD.getPreferredName(), indexName); + } builder.field(IS_NATIVE_FIELD.getPreferredName(), isNative); - builder.field(LANGUAGE_FIELD.getPreferredName(), language); - builder.field(LAST_SEEN_FIELD.getPreferredName(), lastSeen); - syncInfo.toXContent(builder, params); - builder.field(NAME_FIELD.getPreferredName(), name); - builder.field(PIPELINE_FIELD.getPreferredName(), pipeline); - builder.field(SCHEDULING_FIELD.getPreferredName(), scheduling); - builder.field(SERVICE_TYPE_FIELD.getPreferredName(), serviceType); - builder.field(SYNC_CURSOR_FIELD.getPreferredName(), syncCursor); - builder.field(STATUS_FIELD.getPreferredName(), status.toString()); + if (language != null) { + builder.field(LANGUAGE_FIELD.getPreferredName(), language); + } + if (lastSeen != null) { + builder.field(LAST_SEEN_FIELD.getPreferredName(), lastSeen); + } + if (syncInfo != null) { + syncInfo.toXContent(builder, params); + } + if (name != null) { + builder.field(NAME_FIELD.getPreferredName(), name); + } + if (pipeline != null) { + builder.field(PIPELINE_FIELD.getPreferredName(), pipeline); + } + if (scheduling != null) { + builder.field(SCHEDULING_FIELD.getPreferredName(), scheduling); + } + if (serviceType != null) { + builder.field(SERVICE_TYPE_FIELD.getPreferredName(), serviceType); + } + if (syncCursor != null) { + builder.field(SYNC_CURSOR_FIELD.getPreferredName(), syncCursor); + } + if (status != null) { + builder.field(STATUS_FIELD.getPreferredName(), status.toString()); + } builder.field(SYNC_NOW_FIELD.getPreferredName(), syncNow); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java index bb03d3c69c74a..cd98b43adc159 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -467,7 +467,8 @@ else if (configurationValues != null) { } /** - * Updates the error property of a {@link Connector}. + * Updates the error property of a {@link Connector}. If error is non-null the resulting {@link ConnectorStatus} + * is 'error', otherwise it's 'connected'. * * @param connectorId The ID of the {@link Connector} to be updated. * @param error An instance of error property of {@link Connector}, can be reset to [null]. @@ -475,6 +476,9 @@ else if (configurationValues != null) { */ public void updateConnectorError(String connectorId, String error, ActionListener listener) { try { + + ConnectorStatus connectorStatus = Strings.isNullOrEmpty(error) ? ConnectorStatus.CONNECTED : ConnectorStatus.ERROR; + final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_INDEX_NAME, connectorId).doc( new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) .id(connectorId) @@ -482,6 +486,7 @@ public void updateConnectorError(String connectorId, String error, ActionListene .source(new HashMap<>() { { put(Connector.ERROR_FIELD.getPreferredName(), error); + put(Connector.STATUS_FIELD.getPreferredName(), connectorStatus.toString()); } }) ); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java index f722955cc0f9e..87b6c4c3da53f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachine.java @@ -25,11 +25,11 @@ public class ConnectorStateMachine { ConnectorStatus.NEEDS_CONFIGURATION, EnumSet.of(ConnectorStatus.CONFIGURED, ConnectorStatus.ERROR), ConnectorStatus.CONFIGURED, - EnumSet.of(ConnectorStatus.NEEDS_CONFIGURATION, ConnectorStatus.CONNECTED, ConnectorStatus.ERROR), + EnumSet.of(ConnectorStatus.NEEDS_CONFIGURATION, ConnectorStatus.CONFIGURED, ConnectorStatus.CONNECTED, ConnectorStatus.ERROR), ConnectorStatus.CONNECTED, - EnumSet.of(ConnectorStatus.CONFIGURED, ConnectorStatus.ERROR), + EnumSet.of(ConnectorStatus.CONNECTED, ConnectorStatus.CONFIGURED, ConnectorStatus.ERROR), ConnectorStatus.ERROR, - EnumSet.of(ConnectorStatus.CONNECTED, ConnectorStatus.CONFIGURED) + EnumSet.of(ConnectorStatus.CONNECTED, ConnectorStatus.CONFIGURED, ConnectorStatus.ERROR) ); /** diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java index 26371ffbed159..fad349cd31877 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.Objects; @@ -95,14 +90,6 @@ public Request(StreamInput in) throws IOException { PARSER.declareString(optionalConstructorArg(), new ParseField("service_type")); } - public static Request fromXContentBytes(BytesReference source, XContentType xContentType) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return Request.fromXContent(parser); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static Request fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java index 96ef483236823..687a801ab8fd6 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java @@ -7,23 +7,18 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.Objects; @@ -110,14 +105,6 @@ public Request(StreamInput in) throws IOException { PARSER.declareString(optionalConstructorArg(), new ParseField("service_type")); } - public static Request fromXContentBytes(String connectorId, BytesReference source, XContentType xContentType) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public boolean isConnectorIdNullOrEmpty() { return Strings.isNullOrEmpty(connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java index d945930d9ee32..8d4a6dccd95fe 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java @@ -23,6 +23,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestDeleteConnectorAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_delete_action"; @@ -30,13 +32,13 @@ public String getName() { @Override public List routes() { - return List.of(new Route(DELETE, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}")); + return List.of(new Route(DELETE, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}")); } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - String connectorId = restRequest.param("connector_id"); + String connectorId = restRequest.param(CONNECTOR_ID_PARAM); boolean shouldDeleteSyncJobs = restRequest.paramAsBoolean("delete_sync_jobs", false); DeleteConnectorAction.Request request = new DeleteConnectorAction.Request(connectorId, shouldDeleteSyncJobs); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java index 79922755e67ef..8d3d5914ca695 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java @@ -22,6 +22,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetConnectorAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_get_action"; @@ -29,12 +31,12 @@ public String getName() { @Override public List routes() { - return List.of(new Route(GET, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}")); + return List.of(new Route(GET, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}")); } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - GetConnectorAction.Request request = new GetConnectorAction.Request(restRequest.param("connector_id")); + GetConnectorAction.Request request = new GetConnectorAction.Request(restRequest.param(CONNECTOR_ID_PARAM)); return channel -> client.execute(GetConnectorAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java index 51ddcac3cd58c..99bd2e7ed536d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -33,11 +34,10 @@ public List routes() { } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { PostConnectorAction.Request request; - // Handle empty REST request body if (restRequest.hasContent()) { - request = PostConnectorAction.Request.fromXContentBytes(restRequest.content(), restRequest.getXContentType()); + request = PostConnectorAction.Request.fromXContent(restRequest.contentParser()); } else { request = new PostConnectorAction.Request(); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java index fcd292eefc531..feedad45dd890 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +23,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestPutConnectorAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_put_action"; @@ -30,18 +33,17 @@ public String getName() { @Override public List routes() { return List.of( - new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}"), + new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}"), new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT) ); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - String connectorId = restRequest.param("connector_id"); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String connectorId = restRequest.param(CONNECTOR_ID_PARAM); PutConnectorAction.Request request; - // Handle empty REST request body if (restRequest.hasContent()) { - request = PutConnectorAction.Request.fromXContentBytes(connectorId, restRequest.content(), restRequest.getXContentType()); + request = PutConnectorAction.Request.fromXContent(restRequest.contentParser(), connectorId); } else { request = new PutConnectorAction.Request(connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorActiveFilteringAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorActiveFilteringAction.java index fbf44487651cf..4bc58e3b5d52a 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorActiveFilteringAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorActiveFilteringAction.java @@ -22,6 +22,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorActiveFilteringAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_active_filtering_action"; @@ -29,13 +31,15 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_filtering/_activate")); + return List.of( + new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_filtering/_activate") + ); } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { UpdateConnectorActiveFilteringAction.Request request = new UpdateConnectorActiveFilteringAction.Request( - restRequest.param("connector_id") + restRequest.param(CONNECTOR_ID_PARAM) ); return channel -> client.execute( UpdateConnectorActiveFilteringAction.INSTANCE, diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorApiKeyIdAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorApiKeyIdAction.java index 0cb42f6f448a2..093fa0936c817 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorApiKeyIdAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorApiKeyIdAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorApiKeyIdAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_api_key_id_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_api_key_id")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_api_key_id")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorApiKeyIdAction.Request request = UpdateConnectorApiKeyIdAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorApiKeyIdAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorApiKeyIdAction.Request request = UpdateConnectorApiKeyIdAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorApiKeyIdAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java index f4cc47da2f109..7f2447abfdc34 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorConfigurationAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_configuration_action"; @@ -29,20 +33,22 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_configuration")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_configuration")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorConfigurationAction.Request request = UpdateConnectorConfigurationAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorConfigurationAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorConfigurationAction.Request request = UpdateConnectorConfigurationAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorConfigurationAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java index df56f5825f84e..85f94682f0825 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorErrorAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_error_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_error")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_error")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorErrorAction.Request request = UpdateConnectorErrorAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorErrorAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorErrorAction.Request request = UpdateConnectorErrorAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorErrorAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFeaturesAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFeaturesAction.java index 48bf87b114548..c26dcba52b705 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFeaturesAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFeaturesAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorFeaturesAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_features_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_features")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_features")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorFeaturesAction.Request request = UpdateConnectorFeaturesAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorFeaturesAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorFeaturesAction.Request request = UpdateConnectorFeaturesAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorFeaturesAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java index ae294dfebd111..0ee665561b888 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorFilteringAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_filtering_action"; @@ -29,20 +33,22 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_filtering")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_filtering")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorFilteringAction.Request request = UpdateConnectorFilteringAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorFilteringAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorFilteringAction.Request request = UpdateConnectorFilteringAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorFilteringAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } + } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringValidationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringValidationAction.java index 32020eea4b8b9..697cf95b984ef 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringValidationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringValidationAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorFilteringValidationAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_filtering_validation_action"; @@ -29,20 +33,23 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_filtering/_validation")); + return List.of( + new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_filtering/_validation") + ); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorFilteringValidationAction.Request request = UpdateConnectorFilteringValidationAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorFilteringValidationAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorFilteringValidationAction.Request request = UpdateConnectorFilteringValidationAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorFilteringValidationAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameAction.java index ce6dd0a5ba24f..89870643901b9 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorIndexNameAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_index_name_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_index_name")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_index_name")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorIndexNameAction.Request request = UpdateConnectorIndexNameAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorIndexNameAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorIndexNameAction.Request request = UpdateConnectorIndexNameAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorIndexNameAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java index bef6c357fdda3..6f76e70971a9f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java @@ -22,6 +22,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorLastSeenAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_last_seen_action"; @@ -29,12 +31,12 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_check_in")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_check_in")); } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorLastSeenAction.Request request = new UpdateConnectorLastSeenAction.Request(restRequest.param("connector_id")); + UpdateConnectorLastSeenAction.Request request = new UpdateConnectorLastSeenAction.Request(restRequest.param(CONNECTOR_ID_PARAM)); return channel -> client.execute( UpdateConnectorLastSeenAction.INSTANCE, request, diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java index 6275e84a28952..804b792810ffd 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorLastSyncStatsAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_last_sync_stats_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_last_sync")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_last_sync")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorLastSyncStatsAction.Request request = UpdateConnectorLastSyncStatsAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorLastSyncStatsAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorLastSyncStatsAction.Request request = UpdateConnectorLastSyncStatsAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorLastSyncStatsAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java index 7fbd42cbff272..21d7d74166b7a 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorNameAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_name_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_name")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_name")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorNameAction.Request request = UpdateConnectorNameAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorNameAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorNameAction.Request request = UpdateConnectorNameAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorNameAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeAction.java index 464d682567043..e2f9730df723d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorNativeAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_native_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_native")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_native")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorNativeAction.Request request = UpdateConnectorNativeAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorNativeAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorNativeAction.Request request = UpdateConnectorNativeAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorNativeAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java index 465414491bb95..24502d0def1df 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorPipelineAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_pipeline_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_pipeline")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_pipeline")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorPipelineAction.Request request = UpdateConnectorPipelineAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorPipelineAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorPipelineAction.Request request = UpdateConnectorPipelineAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorPipelineAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java index dfc12659d394b..191def3a8af52 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorSchedulingAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_scheduling_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_scheduling")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_scheduling")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorSchedulingAction.Request request = UpdateConnectorSchedulingAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorSchedulingAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorSchedulingAction.Request request = UpdateConnectorSchedulingAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorSchedulingAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorServiceTypeAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorServiceTypeAction.java index 89c3303f8cc94..9375c338d64b4 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorServiceTypeAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorServiceTypeAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorServiceTypeAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_service_type_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_service_type")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_service_type")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorServiceTypeAction.Request request = UpdateConnectorServiceTypeAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorServiceTypeAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorServiceTypeAction.Request request = UpdateConnectorServiceTypeAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorServiceTypeAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorStatusAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorStatusAction.java index 9770a051ce4fc..cb741fa8301a9 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorStatusAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorStatusAction.java @@ -13,8 +13,10 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; +import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -22,6 +24,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorStatusAction extends BaseRestHandler { + private static final String CONNECTOR_ID_PARAM = "connector_id"; + @Override public String getName() { return "connector_update_status_action"; @@ -29,20 +33,21 @@ public String getName() { @Override public List routes() { - return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{connector_id}/_status")); + return List.of(new Route(PUT, "/" + EnterpriseSearch.CONNECTOR_API_ENDPOINT + "/{" + CONNECTOR_ID_PARAM + "}/_status")); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - UpdateConnectorStatusAction.Request request = UpdateConnectorStatusAction.Request.fromXContentBytes( - restRequest.param("connector_id"), - restRequest.content(), - restRequest.getXContentType() - ); - return channel -> client.execute( - UpdateConnectorStatusAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorStatusAction.Request request = UpdateConnectorStatusAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_ID_PARAM) + ); + return channel -> client.execute( + UpdateConnectorStatusAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorApiKeyIdAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorApiKeyIdAction.java index b7f07fc4a34cb..7f726f21ce225 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorApiKeyIdAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorApiKeyIdAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import java.io.IOException; @@ -100,18 +95,6 @@ public ActionRequestValidationException validate() { PARSER.declareStringOrNull(optionalConstructorArg(), Connector.API_KEY_SECRET_ID_FIELD); } - public static UpdateConnectorApiKeyIdAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorApiKeyIdAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorApiKeyIdAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java index 9069f832e1c44..5d36c5f886ea0 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorConfigurationAction.java @@ -7,22 +7,17 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorConfiguration; @@ -123,18 +118,6 @@ public ActionRequestValidationException validate() { PARSER.declareField(optionalConstructorArg(), (p, c) -> p.map(), VALUES_FIELD, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); } - public static UpdateConnectorConfigurationAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorConfigurationAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse connector configuration.", e); - } - } - public static UpdateConnectorConfigurationAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorErrorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorErrorAction.java index ae86c1fc98df1..3e506fc835f65 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorErrorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorErrorAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import java.io.IOException; @@ -84,18 +79,6 @@ public ActionRequestValidationException validate() { PARSER.declareStringOrNull(constructorArg(), Connector.ERROR_FIELD); } - public static UpdateConnectorErrorAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorErrorAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorErrorAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFeaturesAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFeaturesAction.java index c1f62c0efe6e8..56656855583aa 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFeaturesAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFeaturesAction.java @@ -7,20 +7,15 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorFeatures; @@ -83,18 +78,6 @@ public ActionRequestValidationException validate() { PARSER.declareObject(optionalConstructorArg(), (p, c) -> ConnectorFeatures.fromXContent(p), Connector.FEATURES_FIELD); } - public static UpdateConnectorFeaturesAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorFeaturesAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorFeaturesAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringAction.java index 54c9a6e6417dc..660956b2e9d7f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorFiltering; import org.elasticsearch.xpack.application.connector.filtering.FilteringAdvancedSnippet; @@ -138,18 +133,6 @@ public ActionRequestValidationException validate() { PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> FilteringRule.fromXContent(p), FilteringRules.RULES_FIELD); } - public static UpdateConnectorFilteringAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorFilteringAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorFilteringAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringValidationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringValidationAction.java index 2164019c62ba3..92291506d0719 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringValidationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorFilteringValidationAction.java @@ -7,20 +7,15 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.filtering.FilteringRules; import org.elasticsearch.xpack.application.connector.filtering.FilteringValidationInfo; @@ -91,18 +86,6 @@ public ActionRequestValidationException validate() { PARSER.declareObject(constructorArg(), (p, c) -> FilteringValidationInfo.fromXContent(p), FilteringRules.VALIDATION_FIELD); } - public static UpdateConnectorFilteringValidationAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorFilteringValidationAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorFilteringValidationAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java index c6cb18089ad06..e7840e1f84fad 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorIndexNameAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import java.io.IOException; @@ -73,18 +68,6 @@ public String getIndexName() { PARSER.declareStringOrNull(constructorArg(), Connector.INDEX_NAME_FIELD); } - public static UpdateConnectorIndexNameAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorIndexNameAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorIndexNameAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorLastSyncStatsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorLastSyncStatsAction.java index 1628a493cbec5..ae3be3801786c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorLastSyncStatsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorLastSyncStatsAction.java @@ -7,22 +7,17 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorSyncInfo; import org.elasticsearch.xpack.application.connector.ConnectorSyncStatus; @@ -157,18 +152,6 @@ public ActionRequestValidationException validate() { PARSER.declareObjectOrNull(optionalConstructorArg(), (p, c) -> p.map(), null, Connector.SYNC_CURSOR_FIELD); } - public static UpdateConnectorLastSyncStatsAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorLastSyncStatsAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorLastSyncStatsAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNameAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNameAction.java index 1aa10f0b7dd45..bbc1f992b48e2 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNameAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNameAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import java.io.IOException; @@ -101,18 +96,6 @@ public ActionRequestValidationException validate() { PARSER.declareStringOrNull(optionalConstructorArg(), Connector.DESCRIPTION_FIELD); } - public static UpdateConnectorNameAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorNameAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorNameAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNativeAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNativeAction.java index 9b539d055ef7e..7b3f2e4577f4e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNativeAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorNativeAction.java @@ -7,20 +7,15 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import java.io.IOException; @@ -70,18 +65,6 @@ public boolean isNative() { PARSER.declareBoolean(constructorArg(), Connector.IS_NATIVE_FIELD); } - public static UpdateConnectorNativeAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorNativeAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorNativeAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorPipelineAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorPipelineAction.java index ee1f24ea6d20d..e58d614f4ef21 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorPipelineAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorPipelineAction.java @@ -7,20 +7,15 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorIngestPipeline; @@ -87,18 +82,6 @@ public ActionRequestValidationException validate() { PARSER.declareObject(constructorArg(), (p, c) -> ConnectorIngestPipeline.fromXContent(p), Connector.PIPELINE_FIELD); } - public static UpdateConnectorPipelineAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorPipelineAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorPipelineAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorServiceTypeAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorServiceTypeAction.java index 68aec9624d30f..de07a6db21bab 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorServiceTypeAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorServiceTypeAction.java @@ -7,20 +7,15 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import java.io.IOException; @@ -71,18 +66,6 @@ public String getServiceType() { PARSER.declareString(constructorArg(), Connector.SERVICE_TYPE_FIELD); } - public static UpdateConnectorServiceTypeAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorServiceTypeAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorServiceTypeAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorStatusAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorStatusAction.java index cd8b36df2e148..aebaa0afb9052 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorStatusAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/UpdateConnectorStatusAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorStatus; @@ -77,18 +72,6 @@ public ConnectorStatus getStatus() { ); } - public static UpdateConnectorStatusAction.Request fromXContentBytes( - String connectorId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorStatusAction.Request.fromXContent(parser, connectorId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static UpdateConnectorStatusAction.Request fromXContent(XContentParser parser, String connectorId) throws IOException { return PARSER.parse(parser, connectorId); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java index b72bffab81e1f..4aabb9e1af663 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java @@ -91,7 +91,7 @@ public class ConnectorSyncJob implements Writeable, ToXContentObject { public static final ParseField LAST_SEEN_FIELD = new ParseField("last_seen"); - static final ParseField METADATA_FIELD = new ParseField("metadata"); + public static final ParseField METADATA_FIELD = new ParseField("metadata"); static final ParseField STARTED_AT_FIELD = new ParseField("started_at"); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java index 72ca1f1d8499b..9ef895a3a5786 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java @@ -464,6 +464,11 @@ public void updateConnectorSyncJobIngestionStats( Instant lastSeen = Objects.nonNull(request.getLastSeen()) ? request.getLastSeen() : Instant.now(); fieldsToUpdate.put(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName(), lastSeen); + Map metadata = request.getMetadata(); + if (Objects.nonNull(metadata)) { + fieldsToUpdate.put(ConnectorSyncJob.METADATA_FIELD.getPreferredName(), metadata); + } + final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_SYNC_JOB_INDEX_NAME, syncJobId).setRefreshPolicy( WriteRequest.RefreshPolicy.IMMEDIATE ).doc(fieldsToUpdate); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java index dc624b5bf8ba1..952cd12d1ee7c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobStateMachine.java @@ -45,7 +45,7 @@ public class ConnectorSyncJobStateMachine { * @param next The proposed next {link ConnectorSyncStatus} of the {@link ConnectorSyncJob}. */ public static boolean isValidTransition(ConnectorSyncStatus current, ConnectorSyncStatus next) { - return VALID_TRANSITIONS.getOrDefault(current, Collections.emptySet()).contains(next); + return validNextStates(current).contains(next); } /** @@ -60,4 +60,8 @@ public static void assertValidStateTransition(ConnectorSyncStatus current, Conne if (isValidTransition(current, next)) return; throw new ConnectorSyncJobInvalidStatusTransitionException(current, next); } + + public static Set validNextStates(ConnectorSyncStatus current) { + return VALID_TRANSITIONS.getOrDefault(current, Collections.emptySet()); + } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/ClaimConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/ClaimConnectorSyncJobAction.java index 74a7e1bdd0282..b108116a5e68c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/ClaimConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/ClaimConnectorSyncJobAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.syncjob.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJob; @@ -105,14 +100,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static Request fromXContentBytes(String connectorSyncJobId, BytesReference source, XContentType xContentType) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return fromXContent(parser, connectorSyncJobId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse request" + source.utf8ToString()); - } - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobAction.java index 5e898d9524d0b..8c1d24e466daa 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/PostConnectorSyncJobAction.java @@ -7,21 +7,16 @@ package org.elasticsearch.xpack.application.connector.syncjob.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJob; @@ -98,14 +93,6 @@ public ConnectorSyncJobTriggerMethod getTriggerMethod() { return triggerMethod; } - public static Request fromXContentBytes(BytesReference source, XContentType xContentType) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return Request.fromXContent(parser); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString(), e); - } - } - public static Request fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestClaimConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestClaimConnectorSyncJobAction.java index c048f43b6baa6..bea26e77ca531 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestClaimConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestClaimConnectorSyncJobAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; import java.io.IOException; @@ -41,12 +42,13 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - ClaimConnectorSyncJobAction.Request request = ClaimConnectorSyncJobAction.Request.fromXContentBytes( - restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM), - restRequest.content(), - restRequest.getXContentType() - ); - - return channel -> client.execute(ClaimConnectorSyncJobAction.INSTANCE, request, new RestToXContentListener<>(channel)); + try (XContentParser parser = restRequest.contentParser()) { + ClaimConnectorSyncJobAction.Request request = ClaimConnectorSyncJobAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM) + ); + + return channel -> client.execute(ClaimConnectorSyncJobAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java index eac645ab3dc77..66a620d22f753 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; import java.io.IOException; @@ -36,15 +37,13 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - PostConnectorSyncJobAction.Request request = PostConnectorSyncJobAction.Request.fromXContentBytes( - restRequest.content(), - restRequest.getXContentType() - ); - - return channel -> client.execute( - PostConnectorSyncJobAction.INSTANCE, - request, - new RestToXContentListener<>(channel, r -> RestStatus.CREATED, r -> null) - ); + try (XContentParser parser = restRequest.contentParser()) { + PostConnectorSyncJobAction.Request request = PostConnectorSyncJobAction.Request.fromXContent(parser); + return channel -> client.execute( + PostConnectorSyncJobAction.INSTANCE, + request, + new RestToXContentListener<>(channel, r -> RestStatus.CREATED, r -> null) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java index 720bfdf416827..a158191a705ef 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; @@ -41,16 +42,16 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - UpdateConnectorSyncJobErrorAction.Request request = UpdateConnectorSyncJobErrorAction.Request.fromXContentBytes( - restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM), - restRequest.content(), - restRequest.getXContentType() - ); - - return restChannel -> client.execute( - UpdateConnectorSyncJobErrorAction.INSTANCE, - request, - new RestToXContentListener<>(restChannel, ConnectorUpdateActionResponse::status) - ); + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorSyncJobErrorAction.Request request = UpdateConnectorSyncJobErrorAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM) + ); + return restChannel -> client.execute( + UpdateConnectorSyncJobErrorAction.INSTANCE, + request, + new RestToXContentListener<>(restChannel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java index d55d3ba87d1df..500da2a216b1e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; @@ -40,16 +41,17 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - UpdateConnectorSyncJobIngestionStatsAction.Request request = UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContentBytes( - restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM), - restRequest.content(), - restRequest.getXContentType() - ); - - return channel -> client.execute( - UpdateConnectorSyncJobIngestionStatsAction.INSTANCE, - request, - new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) - ); + try (XContentParser parser = restRequest.contentParser()) { + UpdateConnectorSyncJobIngestionStatsAction.Request request = UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContent( + parser, + restRequest.param(CONNECTOR_SYNC_JOB_ID_PARAM) + ); + + return channel -> client.execute( + UpdateConnectorSyncJobIngestionStatsAction.INSTANCE, + request, + new RestToXContentListener<>(channel, ConnectorUpdateActionResponse::status) + ); + } } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobErrorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobErrorAction.java index 3ce5d61e95fdb..2235ba7cfe720 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobErrorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobErrorAction.java @@ -7,20 +7,15 @@ package org.elasticsearch.xpack.application.connector.syncjob.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJob; import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobConstants; @@ -66,14 +61,6 @@ public Request(String connectorSyncJobId, String error) { this.error = error; } - public static Request fromXContentBytes(String connectorSyncJobId, BytesReference source, XContentType xContentType) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorSyncJobErrorAction.Request.fromXContent(parser, connectorSyncJobId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString()); - } - } - public static UpdateConnectorSyncJobErrorAction.Request fromXContent(XContentParser parser, String connectorSyncJobId) throws IOException { return PARSER.parse(parser, connectorSyncJobId); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsAction.java index d76f2c3b788fc..0fd9b6dec8184 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsAction.java @@ -7,22 +7,17 @@ package org.elasticsearch.xpack.application.connector.syncjob.action; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; import org.elasticsearch.xpack.application.connector.ConnectorUtils; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; @@ -30,6 +25,7 @@ import java.io.IOException; import java.time.Instant; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -57,6 +53,7 @@ public static class Request extends ConnectorSyncJobActionRequest implements ToX private final Long indexedDocumentVolume; private final Long totalDocumentCount; private final Instant lastSeen; + private final Map metadata; public Request(StreamInput in) throws IOException { super(in); @@ -66,6 +63,7 @@ public Request(StreamInput in) throws IOException { this.indexedDocumentVolume = in.readLong(); this.totalDocumentCount = in.readOptionalLong(); this.lastSeen = in.readOptionalInstant(); + this.metadata = in.readGenericMap(); } public Request( @@ -74,7 +72,8 @@ public Request( Long indexedDocumentCount, Long indexedDocumentVolume, Long totalDocumentCount, - Instant lastSeen + Instant lastSeen, + Map metadata ) { this.connectorSyncJobId = connectorSyncJobId; this.deletedDocumentCount = deletedDocumentCount; @@ -82,6 +81,7 @@ public Request( this.indexedDocumentVolume = indexedDocumentVolume; this.totalDocumentCount = totalDocumentCount; this.lastSeen = lastSeen; + this.metadata = metadata; } public String getConnectorSyncJobId() { @@ -108,6 +108,10 @@ public Instant getLastSeen() { return lastSeen; } + public Map getMetadata() { + return metadata; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -135,6 +139,7 @@ public ActionRequestValidationException validate() { return validationException; } + @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("connector_sync_job_update_ingestion_stats", false, (args, connectorSyncJobId) -> { Long deletedDocumentCount = (Long) args[0]; @@ -143,6 +148,7 @@ public ActionRequestValidationException validate() { Long totalDocumentVolume = args[3] != null ? (Long) args[3] : null; Instant lastSeen = args[4] != null ? (Instant) args[4] : null; + Map metadata = (Map) args[5]; return new Request( connectorSyncJobId, @@ -150,7 +156,8 @@ public ActionRequestValidationException validate() { indexedDocumentCount, indexedDocumentVolume, totalDocumentVolume, - lastSeen + lastSeen, + metadata ); }); @@ -165,18 +172,7 @@ public ActionRequestValidationException validate() { ConnectorSyncJob.LAST_SEEN_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING ); - } - - public static UpdateConnectorSyncJobIngestionStatsAction.Request fromXContentBytes( - String connectorSyncJobId, - BytesReference source, - XContentType xContentType - ) { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContent(parser, connectorSyncJobId); - } catch (IOException e) { - throw new ElasticsearchParseException("Failed to parse: " + source.utf8ToString()); - } + PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), ConnectorSyncJob.METADATA_FIELD); } public static Request fromXContent(XContentParser parser, String connectorSyncJobId) throws IOException { @@ -192,6 +188,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(ConnectorSyncJob.INDEXED_DOCUMENT_VOLUME_FIELD.getPreferredName(), indexedDocumentVolume); builder.field(ConnectorSyncJob.TOTAL_DOCUMENT_COUNT_FIELD.getPreferredName(), totalDocumentCount); builder.field(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName(), lastSeen); + builder.field(ConnectorSyncJob.METADATA_FIELD.getPreferredName(), metadata); } builder.endObject(); return builder; @@ -206,6 +203,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(indexedDocumentVolume); out.writeOptionalLong(totalDocumentCount); out.writeOptionalInstant(lastSeen); + out.writeGenericMap(metadata); } @Override @@ -218,7 +216,8 @@ public boolean equals(Object o) { && Objects.equals(indexedDocumentCount, request.indexedDocumentCount) && Objects.equals(indexedDocumentVolume, request.indexedDocumentVolume) && Objects.equals(totalDocumentCount, request.totalDocumentCount) - && Objects.equals(lastSeen, request.lastSeen); + && Objects.equals(lastSeen, request.lastSeen) + && Objects.equals(metadata, request.metadata); } @Override @@ -229,7 +228,8 @@ public int hashCode() { indexedDocumentCount, indexedDocumentVolume, totalDocumentCount, - lastSeen + lastSeen, + metadata ); } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java index a696c6e6dde54..12abca3a78591 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.application.connector.action.ConnectorCreateActionResponse; import org.elasticsearch.xpack.application.connector.action.UpdateConnectorApiKeyIdAction; import org.elasticsearch.xpack.application.connector.action.UpdateConnectorConfigurationAction; -import org.elasticsearch.xpack.application.connector.action.UpdateConnectorErrorAction; import org.elasticsearch.xpack.application.connector.action.UpdateConnectorIndexNameAction; import org.elasticsearch.xpack.application.connector.action.UpdateConnectorLastSeenAction; import org.elasticsearch.xpack.application.connector.action.UpdateConnectorLastSyncStatsAction; @@ -712,17 +711,14 @@ public void testUpdateConnectorError() throws Exception { String connectorId = randomUUID(); ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); assertThat(resp.status(), anyOf(equalTo(RestStatus.CREATED), equalTo(RestStatus.OK))); + String error = randomAlphaOfLengthBetween(5, 15); - UpdateConnectorErrorAction.Request updateErrorRequest = new UpdateConnectorErrorAction.Request( - connectorId, - randomAlphaOfLengthBetween(5, 15) - ); - - DocWriteResponse updateResponse = awaitUpdateConnectorError(updateErrorRequest); + DocWriteResponse updateResponse = awaitUpdateConnectorError(connectorId, error); assertThat(updateResponse.status(), equalTo(RestStatus.OK)); Connector indexedConnector = awaitGetConnector(connectorId); - assertThat(updateErrorRequest.getError(), equalTo(indexedConnector.getError())); + assertThat(indexedConnector.getError(), equalTo(error)); + assertThat(indexedConnector.getStatus(), equalTo(ConnectorStatus.ERROR)); } public void testUpdateConnectorError_resetWithNull() throws Exception { @@ -731,13 +727,12 @@ public void testUpdateConnectorError_resetWithNull() throws Exception { ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); assertThat(resp.status(), anyOf(equalTo(RestStatus.CREATED), equalTo(RestStatus.OK))); - UpdateConnectorErrorAction.Request updateErrorRequest = new UpdateConnectorErrorAction.Request(connectorId, null); - - DocWriteResponse updateResponse = awaitUpdateConnectorError(updateErrorRequest); + DocWriteResponse updateResponse = awaitUpdateConnectorError(connectorId, null); assertThat(updateResponse.status(), equalTo(RestStatus.OK)); Connector indexedConnector = awaitGetConnector(connectorId); - assertThat(updateErrorRequest.getError(), equalTo(indexedConnector.getError())); + assertNull(indexedConnector.getError()); + assertThat(indexedConnector.getStatus(), equalTo(ConnectorStatus.CONNECTED)); } public void testUpdateConnectorNameOrDescription() throws Exception { @@ -802,7 +797,7 @@ public void testUpdateConnectorStatus_WithInvalidStatus() throws Exception { Connector connector = ConnectorTestUtils.getRandomConnector(); String connectorId = randomUUID(); - ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); + awaitCreateConnector(connectorId, connector); Connector indexedConnector = awaitGetConnector(connectorId); ConnectorStatus newInvalidStatus = ConnectorTestUtils.getRandomInvalidConnectorNextStatus(indexedConnector.getStatus()); @@ -1347,11 +1342,11 @@ public void onFailure(Exception e) { return resp.get(); } - private UpdateResponse awaitUpdateConnectorError(UpdateConnectorErrorAction.Request updatedError) throws Exception { + private UpdateResponse awaitUpdateConnectorError(String connectorId, String error) throws Exception { CountDownLatch latch = new CountDownLatch(1); final AtomicReference resp = new AtomicReference<>(null); final AtomicReference exc = new AtomicReference<>(null); - connectorIndexService.updateConnectorError(updatedError.getConnectorId(), updatedError.getError(), new ActionListener<>() { + connectorIndexService.updateConnectorError(connectorId, error, new ActionListener<>() { @Override public void onResponse(UpdateResponse indexResponse) { resp.set(indexResponse); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java index d1f08f80d02f2..739ad44fd6c4c 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorStateMachineTests.java @@ -17,6 +17,7 @@ public void testValidTransitionFromCreated() { } public void testInvalidTransitionFromCreated() { + assertFalse(ConnectorStateMachine.isValidTransition(ConnectorStatus.CREATED, ConnectorStatus.CREATED)); assertFalse(ConnectorStateMachine.isValidTransition(ConnectorStatus.CREATED, ConnectorStatus.CONFIGURED)); assertFalse(ConnectorStateMachine.isValidTransition(ConnectorStatus.CREATED, ConnectorStatus.CONNECTED)); } @@ -28,12 +29,14 @@ public void testValidTransitionFromNeedsConfiguration() { public void testInvalidTransitionFromNeedsConfiguration() { assertFalse(ConnectorStateMachine.isValidTransition(ConnectorStatus.NEEDS_CONFIGURATION, ConnectorStatus.CREATED)); assertFalse(ConnectorStateMachine.isValidTransition(ConnectorStatus.NEEDS_CONFIGURATION, ConnectorStatus.CONNECTED)); + assertFalse(ConnectorStateMachine.isValidTransition(ConnectorStatus.NEEDS_CONFIGURATION, ConnectorStatus.NEEDS_CONFIGURATION)); } public void testValidTransitionFromConfigured() { assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.CONFIGURED, ConnectorStatus.NEEDS_CONFIGURATION)); assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.CONFIGURED, ConnectorStatus.CONNECTED)); assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.CONFIGURED, ConnectorStatus.ERROR)); + assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.CONFIGURED, ConnectorStatus.CONFIGURED)); } public void testInvalidTransitionFromConfigured() { @@ -43,6 +46,7 @@ public void testInvalidTransitionFromConfigured() { public void testValidTransitionFromConnected() { assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.CONNECTED, ConnectorStatus.CONFIGURED)); assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.CONNECTED, ConnectorStatus.ERROR)); + assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.CONNECTED, ConnectorStatus.CONNECTED)); } public void testInvalidTransitionFromConnected() { @@ -53,6 +57,7 @@ public void testInvalidTransitionFromConnected() { public void testValidTransitionFromError() { assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.ERROR, ConnectorStatus.CONNECTED)); assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.ERROR, ConnectorStatus.CONFIGURED)); + assertTrue(ConnectorStateMachine.isValidTransition(ConnectorStatus.ERROR, ConnectorStatus.ERROR)); } public void testInvalidTransitionFromError() { @@ -60,12 +65,6 @@ public void testInvalidTransitionFromError() { assertFalse(ConnectorStateMachine.isValidTransition(ConnectorStatus.ERROR, ConnectorStatus.NEEDS_CONFIGURATION)); } - public void testTransitionToSameState() { - for (ConnectorStatus state : ConnectorStatus.values()) { - assertFalse("Transition from " + state + " to itself should be invalid", ConnectorStateMachine.isValidTransition(state, state)); - } - } - public void testAssertValidStateTransition_ExpectExceptionOnInvalidTransition() { assertThrows( ConnectorInvalidStatusTransitionException.class, diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorApiKeyIdActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorApiKeyIdActionTests.java new file mode 100644 index 0000000000000..53d5de565f9f8 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorApiKeyIdActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorApiKeyIdActionTests extends ESTestCase { + + private RestUpdateConnectorApiKeyIdAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorApiKeyIdAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_api_key_id") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationActionTests.java new file mode 100644 index 0000000000000..6712d98dc1c69 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorConfigurationActionTests extends ESTestCase { + + private RestUpdateConnectorConfigurationAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorConfigurationAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_configuration") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorActionTests.java new file mode 100644 index 0000000000000..71a49488d5d21 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorErrorActionTests extends ESTestCase { + + private RestUpdateConnectorErrorAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorErrorAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_error") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFeaturesActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFeaturesActionTests.java new file mode 100644 index 0000000000000..8728efbc23fea --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFeaturesActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorFeaturesActionTests extends ESTestCase { + + private RestUpdateConnectorFeaturesAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorFeaturesAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_features") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringActionTests.java new file mode 100644 index 0000000000000..3a2e009758625 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorFilteringActionTests extends ESTestCase { + + private RestUpdateConnectorFilteringAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorFilteringAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_filtering") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringValidationActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringValidationActionTests.java new file mode 100644 index 0000000000000..812f2a7c5ab5e --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringValidationActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorFilteringValidationActionTests extends ESTestCase { + + private RestUpdateConnectorFilteringValidationAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorFilteringValidationAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_filtering/_validation") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameActionTests.java new file mode 100644 index 0000000000000..18839e02dd7a3 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorIndexNameActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorIndexNameActionTests extends ESTestCase { + + private RestUpdateConnectorIndexNameAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorIndexNameAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_index_name") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsActionTests.java new file mode 100644 index 0000000000000..49f0c239debab --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorLastSyncStatsActionTests extends ESTestCase { + + private RestUpdateConnectorLastSyncStatsAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorLastSyncStatsAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_last_sync") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameActionTests.java new file mode 100644 index 0000000000000..10b0a7cebf94e --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorNameActionTests extends ESTestCase { + + private RestUpdateConnectorNameAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorNameAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_name") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeActionTests.java new file mode 100644 index 0000000000000..8b65aad215efe --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNativeActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorNativeActionTests extends ESTestCase { + + private RestUpdateConnectorNativeAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorNativeAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_native") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineActionTests.java new file mode 100644 index 0000000000000..a5bb4e0689696 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorPipelineActionTests extends ESTestCase { + + private RestUpdateConnectorPipelineAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorPipelineAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_pipeline") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingActionTests.java new file mode 100644 index 0000000000000..91b26f1d9ada8 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorSchedulingActionTests extends ESTestCase { + + private RestUpdateConnectorSchedulingAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorSchedulingAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_scheduling") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorServiceTypeActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorServiceTypeActionTests.java new file mode 100644 index 0000000000000..16657f17e5d27 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorServiceTypeActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorServiceTypeActionTests extends ESTestCase { + + private RestUpdateConnectorServiceTypeAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorServiceTypeAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_service_type") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorStatusActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorStatusActionTests.java new file mode 100644 index 0000000000000..fc083ede81395 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorStatusActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorStatusActionTests extends ESTestCase { + + private RestUpdateConnectorStatusAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorStatusAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/123/_status") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java index b9a77adc12a3c..f6c0a54f107b4 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexServiceTests.java @@ -784,6 +784,7 @@ public void testUpdateConnectorSyncJobError_WithStatusPending_ExpectStatusExcept assertThrows(ElasticsearchStatusException.class, () -> awaitUpdateConnectorSyncJob(syncJobId, "some error")); } + @SuppressWarnings("unchecked") public void testUpdateConnectorSyncJobIngestionStats() throws Exception { PostConnectorSyncJobAction.Request syncJobRequest = ConnectorSyncJobTestUtils.getRandomPostConnectorSyncJobActionRequest( connectorOneId @@ -802,6 +803,7 @@ public void testUpdateConnectorSyncJobIngestionStats() throws Exception { Long requestIndexedDocumentVolume = request.getIndexedDocumentVolume(); Long requestTotalDocumentCount = request.getTotalDocumentCount(); Instant requestLastSeen = request.getLastSeen(); + Map metadata = request.getMetadata(); Long deletedDocumentCountAfterUpdate = (Long) syncJobSourceAfterUpdate.get( ConnectorSyncJob.DELETED_DOCUMENT_COUNT_FIELD.getPreferredName() @@ -818,6 +820,9 @@ public void testUpdateConnectorSyncJobIngestionStats() throws Exception { Instant lastSeenAfterUpdate = Instant.parse( (String) syncJobSourceAfterUpdate.get(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName()) ); + Map metadataAfterUpdate = (Map) syncJobSourceAfterUpdate.get( + ConnectorSyncJob.METADATA_FIELD.getPreferredName() + ); assertThat(updateResponse.status(), equalTo(RestStatus.OK)); assertThat(deletedDocumentCountAfterUpdate, equalTo(requestDeletedDocumentCount)); @@ -825,6 +830,7 @@ public void testUpdateConnectorSyncJobIngestionStats() throws Exception { assertThat(indexedDocumentVolumeAfterUpdate, equalTo(requestIndexedDocumentVolume)); assertThat(totalDocumentCountAfterUpdate, equalTo(requestTotalDocumentCount)); assertThat(lastSeenAfterUpdate, equalTo(requestLastSeen)); + assertThat(metadataAfterUpdate, equalTo(metadata)); assertFieldsExceptAllIngestionStatsDidNotUpdate(syncJobSourceBeforeUpdate, syncJobSourceAfterUpdate); } @@ -838,12 +844,14 @@ public void testUpdateConnectorSyncJobIngestionStats_WithoutLastSeen_ExpectUpdat Instant lastSeenBeforeUpdate = Instant.parse( (String) syncJobSourceBeforeUpdate.get(ConnectorSyncJob.LAST_SEEN_FIELD.getPreferredName()) ); + UpdateConnectorSyncJobIngestionStatsAction.Request request = new UpdateConnectorSyncJobIngestionStatsAction.Request( syncJobId, 10L, 20L, 100L, 10L, + null, null ); @@ -866,7 +874,7 @@ public void testUpdateConnectorSyncJobIngestionStats_WithMissingSyncJobId_Expect expectThrows( ResourceNotFoundException.class, () -> awaitUpdateConnectorSyncJobIngestionStats( - new UpdateConnectorSyncJobIngestionStatsAction.Request(NON_EXISTING_SYNC_JOB_ID, 0L, 0L, 0L, 0L, Instant.now()) + new UpdateConnectorSyncJobIngestionStatsAction.Request(NON_EXISTING_SYNC_JOB_ID, 0L, 0L, 0L, 0L, Instant.now(), null) ) ); } @@ -1067,7 +1075,8 @@ private static void assertFieldsExceptAllIngestionStatsDidNotUpdate( ConnectorSyncJob.INDEXED_DOCUMENT_COUNT_FIELD, ConnectorSyncJob.INDEXED_DOCUMENT_VOLUME_FIELD, ConnectorSyncJob.TOTAL_DOCUMENT_COUNT_FIELD, - ConnectorSyncJob.LAST_SEEN_FIELD + ConnectorSyncJob.LAST_SEEN_FIELD, + ConnectorSyncJob.METADATA_FIELD ) ); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTestUtils.java index a4ff76e6f2cf9..e72bf04fb7e55 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTestUtils.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTestUtils.java @@ -160,7 +160,8 @@ public static UpdateConnectorSyncJobIngestionStatsAction.Request getRandomUpdate randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomInstantBetween(lowerBoundInstant, upperBoundInstant) + randomInstantBetween(lowerBoundInstant, upperBoundInstant), + randomMap(2, 3, () -> new Tuple<>(randomAlphaOfLength(4), randomAlphaOfLength(4))) ); } @@ -176,7 +177,8 @@ public static UpdateConnectorSyncJobIngestionStatsAction.Request getRandomUpdate randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomInstantBetween(lowerBoundInstant, upperBoundInstant) + randomInstantBetween(lowerBoundInstant, upperBoundInstant), + randomMap(2, 3, () -> new Tuple<>(randomAlphaOfLength(4), randomAlphaOfLength(4))) ); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestClaimConnectorSyncJobActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestClaimConnectorSyncJobActionTests.java new file mode 100644 index 0000000000000..567fe803a250e --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestClaimConnectorSyncJobActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestClaimConnectorSyncJobActionTests extends ESTestCase { + + private RestClaimConnectorSyncJobAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestClaimConnectorSyncJobAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/_sync_job/456/_claim") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobActionTests.java new file mode 100644 index 0000000000000..231e0d2b14144 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestPostConnectorSyncJobActionTests extends ESTestCase { + + private RestPostConnectorSyncJobAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestPostConnectorSyncJobAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/_sync_job") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorActionTests.java new file mode 100644 index 0000000000000..19fd2df9dc3b6 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorSyncJobErrorActionTests extends ESTestCase { + + private RestUpdateConnectorSyncJobErrorAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorSyncJobErrorAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/_sync_job/456/_error") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsActionTests.java new file mode 100644 index 0000000000000..edb0250524049 --- /dev/null +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsActionTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.application.connector.syncjob.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.mockito.Mockito.mock; + +public class RestUpdateConnectorSyncJobIngestionStatsActionTests extends ESTestCase { + + private RestUpdateConnectorSyncJobIngestionStatsAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestUpdateConnectorSyncJobIngestionStatsAction(); + } + + public void testPrepareRequest_emptyPayload_badRequestError() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) + .withPath("/_connector/_sync_job/456/_stats") + .build(); + + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request body is required"))); + } +} diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestBWCSerializingTests.java index 6e2178d8341cf..ff586ae28109a 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestBWCSerializingTests.java @@ -55,7 +55,8 @@ protected UpdateConnectorSyncJobIngestionStatsAction.Request mutateInstanceForVe instance.getIndexedDocumentCount(), instance.getIndexedDocumentVolume(), instance.getTotalDocumentCount(), - instance.getLastSeen() + instance.getLastSeen(), + instance.getMetadata() ); } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestTests.java index 4f78ad3ffa7e7..1f3ca480ee1c8 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/action/UpdateConnectorSyncJobIngestionStatsActionRequestTests.java @@ -8,22 +8,46 @@ package org.elasticsearch.xpack.application.connector.syncjob.action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobTestUtils; +import org.junit.Before; +import java.io.IOException; import java.time.Instant; +import java.util.List; +import java.util.Map; +import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobConstants.EMPTY_CONNECTOR_SYNC_JOB_ID_ERROR_MESSAGE; import static org.elasticsearch.xpack.application.connector.syncjob.action.UpdateConnectorSyncJobIngestionStatsAction.Request.DELETED_DOCUMENT_COUNT_NEGATIVE_ERROR_MESSAGE; import static org.elasticsearch.xpack.application.connector.syncjob.action.UpdateConnectorSyncJobIngestionStatsAction.Request.INDEXED_DOCUMENT_COUNT_NEGATIVE_ERROR_MESSAGE; import static org.elasticsearch.xpack.application.connector.syncjob.action.UpdateConnectorSyncJobIngestionStatsAction.Request.INDEXED_DOCUMENT_VOLUME_NEGATIVE_ERROR_MESSAGE; import static org.elasticsearch.xpack.application.connector.syncjob.action.UpdateConnectorSyncJobIngestionStatsAction.Request.TOTAL_DOCUMENT_COUNT_NEGATIVE_ERROR_MESSAGE; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class UpdateConnectorSyncJobIngestionStatsActionRequestTests extends ESTestCase { + private NamedWriteableRegistry namedWriteableRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + } + public void testValidate_WhenRequestIsValid_ExpectNoValidationError() { UpdateConnectorSyncJobIngestionStatsAction.Request request = ConnectorSyncJobTestUtils .getRandomUpdateConnectorSyncJobIngestionStatsActionRequest(); @@ -39,7 +63,8 @@ public void testValidate_WhenConnectorSyncJobIdIsEmpty_ExpectValidationError() { 0L, 0L, 0L, - Instant.now() + Instant.now(), + null ); ActionRequestValidationException exception = request.validate(); @@ -54,7 +79,8 @@ public void testValidate_WhenConnectorSyncJobIdIsNull_ExpectValidationError() { 0L, 0L, 0L, - Instant.now() + Instant.now(), + null ); ActionRequestValidationException exception = request.validate(); @@ -69,7 +95,8 @@ public void testValidate_WhenDeletedDocumentCountIsNegative_ExpectValidationErro 0L, 0L, 0L, - Instant.now() + Instant.now(), + null ); ActionRequestValidationException exception = request.validate(); @@ -84,7 +111,8 @@ public void testValidate_WhenIndexedDocumentCountIsNegative_ExpectValidationErro -10L, 0L, 0L, - Instant.now() + Instant.now(), + null ); ActionRequestValidationException exception = request.validate(); @@ -99,7 +127,8 @@ public void testValidate_WhenIndexedDocumentVolumeIsNegative_ExpectValidationErr 0L, -10L, 0L, - Instant.now() + Instant.now(), + null ); ActionRequestValidationException exception = request.validate(); @@ -114,11 +143,92 @@ public void testValidate_WhenTotalDocumentCountIsNegative_ExpectValidationError( 0L, 0L, -10L, - Instant.now() + Instant.now(), + null ); ActionRequestValidationException exception = request.validate(); assertThat(exception, notNullValue()); assertThat(exception.getMessage(), containsString(TOTAL_DOCUMENT_COUNT_NEGATIVE_ERROR_MESSAGE)); } + + public void testParseRequest_requiredFields_validRequest() throws IOException { + String requestPayload = XContentHelper.stripWhitespace(""" + { + "deleted_document_count": 10, + "indexed_document_count": 20, + "indexed_document_volume": 1000 + } + """); + + UpdateConnectorSyncJobIngestionStatsAction.Request request = UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContent( + XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(requestPayload), XContentType.JSON), + randomUUID() + ); + + assertThat(request.getDeletedDocumentCount(), equalTo(10L)); + assertThat(request.getIndexedDocumentCount(), equalTo(20L)); + assertThat(request.getIndexedDocumentVolume(), equalTo(1000L)); + } + + public void testParseRequest_allFieldsWithoutLastSeen_validRequest() throws IOException { + String requestPayload = XContentHelper.stripWhitespace(""" + { + "deleted_document_count": 10, + "indexed_document_count": 20, + "indexed_document_volume": 1000, + "total_document_count": 55, + "metadata": {"key1": 1, "key2": 2} + } + """); + + UpdateConnectorSyncJobIngestionStatsAction.Request request = UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContent( + XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(requestPayload), XContentType.JSON), + randomUUID() + ); + + assertThat(request.getDeletedDocumentCount(), equalTo(10L)); + assertThat(request.getIndexedDocumentCount(), equalTo(20L)); + assertThat(request.getIndexedDocumentVolume(), equalTo(1000L)); + assertThat(request.getTotalDocumentCount(), equalTo(55L)); + assertThat(request.getMetadata(), equalTo(Map.of("key1", 1, "key2", 2))); + } + + public void testParseRequest_metadataTypeInt_invalidRequest() throws IOException { + String requestPayload = XContentHelper.stripWhitespace(""" + { + "deleted_document_count": 10, + "indexed_document_count": 20, + "indexed_document_volume": 1000, + "metadata": 42 + } + """); + + expectThrows( + XContentParseException.class, + () -> UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContent( + XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(requestPayload), XContentType.JSON), + randomUUID() + ) + ); + } + + public void testParseRequest_metadataTypeString_invalidRequest() throws IOException { + String requestPayload = XContentHelper.stripWhitespace(""" + { + "deleted_document_count": 10, + "indexed_document_count": 20, + "indexed_document_volume": 1000, + "metadata": "I'm a wrong metadata type" + } + """); + + expectThrows( + XContentParseException.class, + () -> UpdateConnectorSyncJobIngestionStatsAction.Request.fromXContent( + XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(requestPayload), XContentType.JSON), + randomUUID() + ) + ); + } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index 9293ffc40ec53..c001b312d5578 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -245,7 +245,9 @@ public void testEqlCBCleanedUp_on_ParentCBBreak() { final int searchRequestsExpectedCount = 2; // let the parent circuit breaker fail, setting its limit to zero - Settings settings = Settings.builder().put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 0).build(); + Settings settings = Settings.builder() + .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "0%") + .build(); try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( @@ -277,6 +279,7 @@ public void testEqlCBCleanedUp_on_ParentCBBreak() { TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher, Collections.emptyList()); window.execute(wrap(p -> fail(), ex -> assertTrue(ex instanceof CircuitBreakingException))); } + assertCriticalWarnings("[indices.breaker.total.limit] setting of [0%] is below the recommended minimum of 50.0% of the heap"); } private List breakerSettings() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/VerifierChecks.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/VerifierChecks.java deleted file mode 100644 index 36ce187d8600c..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/VerifierChecks.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.analyzer; - -import org.elasticsearch.xpack.esql.core.common.Failure; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; - -import java.util.Set; - -import static org.elasticsearch.xpack.esql.core.common.Failure.fail; -import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; - -public final class VerifierChecks { - - public static void checkFilterConditionType(LogicalPlan p, Set localFailures) { - if (p instanceof Filter) { - Expression condition = ((Filter) p).condition(); - if (condition.dataType() != BOOLEAN) { - localFailures.add(fail(condition, "Condition expression needs to be boolean, found [{}]", condition.dataType())); - } - } - } - -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/async/QlStatusResponse.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/async/QlStatusResponse.java deleted file mode 100644 index 8c28f08e8d882..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/async/QlStatusResponse.java +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.async; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.async.StoredAsyncResponse; -import org.elasticsearch.xpack.core.search.action.SearchStatusResponse; - -import java.io.IOException; -import java.util.Objects; - -/** - * A response for *QL search status request - */ -public class QlStatusResponse extends ActionResponse implements SearchStatusResponse, ToXContentObject { - private final String id; - private final boolean isRunning; - private final boolean isPartial; - private final Long startTimeMillis; - private final long expirationTimeMillis; - private final RestStatus completionStatus; - - public interface AsyncStatus { - String id(); - - boolean isRunning(); - - boolean isPartial(); - } - - public QlStatusResponse( - String id, - boolean isRunning, - boolean isPartial, - Long startTimeMillis, - long expirationTimeMillis, - RestStatus completionStatus - ) { - this.id = id; - this.isRunning = isRunning; - this.isPartial = isPartial; - this.startTimeMillis = startTimeMillis; - this.expirationTimeMillis = expirationTimeMillis; - this.completionStatus = completionStatus; - } - - /** - * Get status from the stored Ql search response - * @param storedResponse - a response from a stored search - * @param expirationTimeMillis – expiration time in milliseconds - * @param id – encoded async search id - * @return a status response - */ - public static QlStatusResponse getStatusFromStoredSearch( - StoredAsyncResponse storedResponse, - long expirationTimeMillis, - String id - ) { - S searchResponse = storedResponse.getResponse(); - if (searchResponse != null) { - assert searchResponse.isRunning() == false : "Stored Ql search response must have a completed status!"; - return new QlStatusResponse( - searchResponse.id(), - false, - searchResponse.isPartial(), - null, // we don't store in the index the start time for completed response - expirationTimeMillis, - RestStatus.OK - ); - } else { - Exception exc = storedResponse.getException(); - assert exc != null : "Stored Ql response must either have a search response or an exception!"; - return new QlStatusResponse( - id, - false, - false, - null, // we don't store in the index the start time for completed response - expirationTimeMillis, - ExceptionsHelper.status(exc) - ); - } - } - - public QlStatusResponse(StreamInput in) throws IOException { - this.id = in.readString(); - this.isRunning = in.readBoolean(); - this.isPartial = in.readBoolean(); - this.startTimeMillis = in.readOptionalLong(); - this.expirationTimeMillis = in.readLong(); - this.completionStatus = (this.isRunning == false) ? RestStatus.readFrom(in) : null; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(id); - out.writeBoolean(isRunning); - out.writeBoolean(isPartial); - out.writeOptionalLong(startTimeMillis); - out.writeLong(expirationTimeMillis); - if (isRunning == false) { - RestStatus.writeTo(out, completionStatus); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.field("id", id); - builder.field("is_running", isRunning); - builder.field("is_partial", isPartial); - if (startTimeMillis != null) { // start time is available only for a running eql search - builder.timeField("start_time_in_millis", "start_time", startTimeMillis); - } - builder.timeField("expiration_time_in_millis", "expiration_time", expirationTimeMillis); - if (isRunning == false) { // completion status is available only for a completed eql search - builder.field("completion_status", completionStatus.getStatus()); - } - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null || getClass() != obj.getClass()) return false; - QlStatusResponse other = (QlStatusResponse) obj; - return id.equals(other.id) - && isRunning == other.isRunning - && isPartial == other.isPartial - && Objects.equals(startTimeMillis, other.startTimeMillis) - && expirationTimeMillis == other.expirationTimeMillis - && Objects.equals(completionStatus, other.completionStatus); - } - - @Override - public int hashCode() { - return Objects.hash(id, isRunning, isPartial, startTimeMillis, expirationTimeMillis, completionStatus); - } - - /** - * Returns the id of the eql search status request. - */ - public String getId() { - return id; - } - - /** - * Returns {@code true} if the eql search is still running in the cluster, - * or {@code false} if the search has been completed. - */ - public boolean isRunning() { - return isRunning; - } - - /** - * Returns {@code true} if the eql search results are partial. - * This could be either because eql search hasn't finished yet, - * or if it finished and some shards have failed or timed out. - */ - public boolean isPartial() { - return isPartial; - } - - /** - * Returns a timestamp when the eql search task started, in milliseconds since epoch. - * For a completed eql search returns {@code null}, as we don't store start time for completed searches. - */ - public Long getStartTime() { - return startTimeMillis; - } - - /** - * Returns a timestamp when the eql search will be expired, in milliseconds since epoch. - */ - @Override - public long getExpirationTime() { - return expirationTimeMillis; - } - - /** - * For a completed eql search returns the completion status. - * For a still running eql search returns {@code null}. - */ - public RestStatus getCompletionStatus() { - return completionStatus; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/AggRef.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/AggRef.java deleted file mode 100644 index 54e44f55c96ab..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/AggRef.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search; - -/** - * Reference to a ES aggregation (which can be either a GROUP BY or Metric agg). - */ -public abstract class AggRef implements FieldExtraction { - - @Override - public void collectFields(QlSourceBuilder sourceBuilder) { - // Aggregations do not need any special fields - } - - @Override - public boolean supportedByAggsOnlyQuery() { - return true; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/FieldExtraction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/FieldExtraction.java deleted file mode 100644 index 6751a8412153b..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/FieldExtraction.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search; - -import org.elasticsearch.search.builder.SearchSourceBuilder; - -/** - * An interface for something that needs to extract field(s) from a result. - */ -public interface FieldExtraction { - - /** - * Add whatever is necessary to the {@link SearchSourceBuilder} - * in order to fetch the field. This can include tracking the score, - * {@code _source} fields, doc values fields, and script fields. - */ - void collectFields(QlSourceBuilder sourceBuilder); - - /** - * Is this aggregation supported in an "aggregation only" query - * ({@code true}) or should it force a scroll query ({@code false})? - */ - boolean supportedByAggsOnlyQuery(); -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/QlSourceBuilder.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/QlSourceBuilder.java deleted file mode 100644 index a8a0198400027..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/QlSourceBuilder.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search; - -import org.elasticsearch.script.Script; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.FieldAndFormat; - -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.Set; - -/** - * A {@code SqlSourceBuilder} is a builder object passed to objects implementing - * {@link FieldExtraction} that can "build" whatever needs to be extracted from - * the resulting ES document as a field. - */ -public class QlSourceBuilder { - // The LinkedHashMaps preserve the order of the fields in the response - private final Set fetchFields = new LinkedHashSet<>(); - private final Map scriptFields = new LinkedHashMap<>(); - - boolean trackScores = false; - - public QlSourceBuilder() {} - - /** - * Turns on returning the {@code _score} for documents. - */ - public void trackScores() { - this.trackScores = true; - } - - /** - * Retrieve the requested field using the "fields" API - */ - public void addFetchField(String field, String format) { - fetchFields.add(new FieldAndFormat(field, format)); - } - - /** - * Return the given field as a script field with the supplied script - */ - public void addScriptField(String name, Script script) { - scriptFields.put(name, script); - } - - /** - * Collect the necessary fields, modifying the {@code SearchSourceBuilder} - * to retrieve them from the document. - */ - public void build(SearchSourceBuilder sourceBuilder) { - sourceBuilder.trackScores(this.trackScores); - fetchFields.forEach(field -> sourceBuilder.fetchField(new FieldAndFormat(field.field, field.format, null))); - scriptFields.forEach(sourceBuilder::scriptField); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/AbstractFieldHitExtractor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/AbstractFieldHitExtractor.java deleted file mode 100644 index 9f7155a78e66f..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/AbstractFieldHitExtractor.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.document.DocumentField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.xpack.esql.core.InvalidArgumentException; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.type.DataType; - -import java.io.IOException; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -/** - * Extractor for ES fields. Works for both 'normal' fields but also nested ones (which require hitName to be set). - * The latter is used as metadata in assembling the results in the tabular response. - */ -public abstract class AbstractFieldHitExtractor implements HitExtractor { - - private final String fieldName, hitName; - private final DataType dataType; - private final ZoneId zoneId; - - protected MultiValueSupport multiValueSupport; - - public enum MultiValueSupport { - NONE, - LENIENT, - FULL - } - - protected AbstractFieldHitExtractor(String name, DataType dataType, ZoneId zoneId) { - this(name, dataType, zoneId, null, MultiValueSupport.NONE); - } - - protected AbstractFieldHitExtractor(String name, DataType dataType, ZoneId zoneId, MultiValueSupport multiValueSupport) { - this(name, dataType, zoneId, null, multiValueSupport); - } - - protected AbstractFieldHitExtractor( - String name, - DataType dataType, - ZoneId zoneId, - String hitName, - MultiValueSupport multiValueSupport - ) { - this.fieldName = name; - this.dataType = dataType; - this.zoneId = zoneId; - this.multiValueSupport = multiValueSupport; - this.hitName = hitName; - - if (hitName != null) { - if (name.contains(hitName) == false) { - throw new QlIllegalArgumentException("Hitname [{}] specified but not part of the name [{}]", hitName, name); - } - } - } - - @SuppressWarnings("this-escape") - protected AbstractFieldHitExtractor(StreamInput in) throws IOException { - fieldName = in.readString(); - String typeName = in.readOptionalString(); - dataType = typeName != null ? loadTypeFromName(typeName) : null; - hitName = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersions.V_8_6_0)) { - this.multiValueSupport = in.readBoolean() ? MultiValueSupport.LENIENT : MultiValueSupport.NONE; - } else { - this.multiValueSupport = in.readEnum(MultiValueSupport.class); - } - zoneId = readZoneId(in); - } - - protected DataType loadTypeFromName(String typeName) { - return DataType.fromTypeName(typeName); - } - - protected abstract ZoneId readZoneId(StreamInput in) throws IOException; - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(fieldName); - out.writeOptionalString(dataType == null ? null : dataType.typeName()); - out.writeOptionalString(hitName); - if (out.getTransportVersion().before(TransportVersions.V_8_6_0)) { - out.writeBoolean(multiValueSupport != MultiValueSupport.NONE); - } else { - out.writeEnum(multiValueSupport); - } - - } - - @Override - public Object extract(SearchHit hit) { - Object value = null; - DocumentField field = null; - if (hitName != null) { - value = unwrapFieldsMultiValue(extractNestedField(hit)); - } else { - field = hit.field(fieldName); - if (field != null) { - value = unwrapFieldsMultiValue(field.getValues()); - } - } - return value; - } - - /* - * For a path of fields like root.nested1.nested2.leaf where nested1 and nested2 are nested field types, - * fieldName is root.nested1.nested2.leaf, while hitName is root.nested1.nested2 - * We first look for root.nested1.nested2 or root.nested1 or root in the SearchHit until we find something. - * If the DocumentField lives under "root.nested1" the remaining path to search for (in the DocumentField itself) is nested2. - * After this step is done, what remains to be done is just getting the leaf values. - */ - @SuppressWarnings("unchecked") - private Object extractNestedField(SearchHit hit) { - Object value; - DocumentField field; - String tempHitname = hitName; - List remainingPath = new ArrayList<>(); - // first, search for the "root" DocumentField under which the remaining path of nested document values is - while ((field = hit.field(tempHitname)) == null) { - int indexOfDot = tempHitname.lastIndexOf('.'); - if (indexOfDot < 0) {// there is no such field in the hit - return null; - } - remainingPath.add(0, tempHitname.substring(indexOfDot + 1)); - tempHitname = tempHitname.substring(0, indexOfDot); - } - // then dig into DocumentField's structure until we reach the field we are interested into - if (remainingPath.size() > 0) { - List values = field.getValues(); - Iterator pathIterator = remainingPath.iterator(); - while (pathIterator.hasNext()) { - String pathElement = pathIterator.next(); - Map> elements = (Map>) values.get(0); - values = elements.get(pathElement); - /* - * if this path is not found it means we hit another nested document (inner_root_1.inner_root_2.nested_field_2) - * something like this - * "root_field_1.root_field_2.nested_field_1" : [ - * { - * "inner_root_1.inner_root_2.nested_field_2" : [ - * { - * "leaf_field" : [ - * "abc2" - * ] - * So, start re-building the path until the right one is found, ie inner_root_1.inner_root_2...... - */ - while (values == null) { - pathElement += "." + pathIterator.next(); - values = elements.get(pathElement); - } - } - value = ((Map) values.get(0)).get(fieldName.substring(hitName.length() + 1)); - } else { - value = field.getValues(); - } - return value; - } - - protected Object unwrapFieldsMultiValue(Object values) { - if (values == null) { - return null; - } - if (values instanceof Map && hitName != null) { - // extract the sub-field from a nested field (dep.dep_name -> dep_name) - return unwrapFieldsMultiValue(((Map) values).get(fieldName.substring(hitName.length() + 1))); - } - if (values instanceof List list) { - if (list.isEmpty()) { - return null; - } else { - if (isPrimitive(list) == false) { - if (list.size() == 1 || multiValueSupport == MultiValueSupport.LENIENT) { - return unwrapFieldsMultiValue(list.get(0)); - } else if (multiValueSupport == MultiValueSupport.FULL) { - List unwrappedValues = new ArrayList<>(); - for (Object value : list) { - unwrappedValues.add(unwrapFieldsMultiValue(value)); - } - values = unwrappedValues; - } else { - // missing `field_multi_value_leniency` setting - throw new InvalidArgumentException("Arrays (returned by [{}]) are not supported", fieldName); - } - } - } - } - - Object unwrapped = unwrapCustomValue(values); - if (unwrapped != null && isListOfNulls(unwrapped) == false) { - return unwrapped; - } - - return values; - } - - private static boolean isListOfNulls(Object unwrapped) { - if (unwrapped instanceof List list) { - if (list.size() == 0) { - return false; - } - for (Object o : list) { - if (o != null) { - return false; - } - } - return true; - } - return false; - } - - protected abstract Object unwrapCustomValue(Object values); - - protected abstract boolean isPrimitive(List list); - - @Override - public String hitName() { - return hitName; - } - - public String fieldName() { - return fieldName; - } - - public ZoneId zoneId() { - return zoneId; - } - - public DataType dataType() { - return dataType; - } - - public MultiValueSupport multiValueSupport() { - return multiValueSupport; - } - - @Override - public String toString() { - return fieldName + "@" + hitName + "@" + zoneId; - } - - @Override - public boolean equals(Object obj) { - if (obj == null || obj.getClass() != getClass()) { - return false; - } - AbstractFieldHitExtractor other = (AbstractFieldHitExtractor) obj; - return fieldName.equals(other.fieldName) && hitName.equals(other.hitName) && multiValueSupport == other.multiValueSupport; - } - - @Override - public int hashCode() { - return Objects.hash(fieldName, hitName, multiValueSupport); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/BucketExtractor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/BucketExtractor.java deleted file mode 100644 index a25482d92ecce..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/BucketExtractor.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; - -/** - * Extracts an aggregation value from a {@link Bucket}. - */ -public interface BucketExtractor extends NamedWriteable { - - Object extract(Bucket bucket); -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/BucketExtractors.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/BucketExtractors.java deleted file mode 100644 index fa7443e190d31..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/BucketExtractors.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; - -import java.util.ArrayList; -import java.util.List; - -public final class BucketExtractors { - - private BucketExtractors() {} - - /** - * All of the named writeables needed to deserialize the instances of - * {@linkplain BucketExtractor}s. - */ - public static List getNamedWriteables() { - List entries = new ArrayList<>(); - entries.add(new Entry(BucketExtractor.class, ComputingExtractor.NAME, ComputingExtractor::new)); - entries.add(new Entry(BucketExtractor.class, ConstantExtractor.NAME, ConstantExtractor::new)); - return entries; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ComputingExtractor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ComputingExtractor.java deleted file mode 100644 index 1116a43022da2..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ComputingExtractor.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.HitExtractorProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; -import java.util.Objects; - -/** - * Hit/BucketExtractor that delegates to a processor. The difference between this class - * and {@link HitExtractorProcessor} is that the latter is used inside a - * {@link Processor} tree as a leaf (and thus can effectively parse the - * {@link SearchHit} while this class is used when scrolling and passing down - * the results. - * - * In the future, the processor might be used across the board for all columns - * to reduce API complexity (and keep the {@link HitExtractor} only as an - * internal implementation detail). - */ -public class ComputingExtractor implements HitExtractor, BucketExtractor { - /** - * Stands for {@code comPuting}. We try to use short names for {@link HitExtractor}s - * to save a few bytes when when we send them back to the user. - */ - static final String NAME = "p"; - private final Processor processor; - private final String hitName; - - public ComputingExtractor(Processor processor) { - this(processor, null); - } - - public ComputingExtractor(Processor processor, String hitName) { - this.processor = processor; - this.hitName = hitName; - } - - // Visibility required for tests - public ComputingExtractor(StreamInput in) throws IOException { - processor = in.readNamedWriteable(Processor.class); - hitName = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(processor); - out.writeOptionalString(hitName); - } - - @Override - public String getWriteableName() { - return NAME; - } - - public Processor processor() { - return processor; - } - - public Object extract(Object input) { - return processor.process(input); - } - - @Override - public Object extract(Bucket bucket) { - return processor.process(bucket); - } - - @Override - public Object extract(SearchHit hit) { - return processor.process(hit); - } - - @Override - public String hitName() { - return hitName; - } - - @Override - public boolean equals(Object obj) { - if (obj == null || obj.getClass() != getClass()) { - return false; - } - ComputingExtractor other = (ComputingExtractor) obj; - return Objects.equals(processor, other.processor) && Objects.equals(hitName, other.hitName); - } - - @Override - public int hashCode() { - return Objects.hash(processor, hitName); - } - - @Override - public String toString() { - return processor.toString(); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ConstantExtractor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ConstantExtractor.java deleted file mode 100644 index bba311a085ed2..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ConstantExtractor.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; - -import java.io.IOException; -import java.util.Objects; - -/** - * Returns the a constant for every search hit against which it is run. - */ -public class ConstantExtractor implements HitExtractor, BucketExtractor { - /** - * Stands for {@code constant}. We try to use short names for {@link HitExtractor}s - * to save a few bytes when when we send them back to the user. - */ - static final String NAME = "c"; - private final Object constant; - - public ConstantExtractor(Object constant) { - this.constant = constant; - } - - ConstantExtractor(StreamInput in) throws IOException { - constant = in.readGenericValue(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeGenericValue(constant); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public Object extract(SearchHit hit) { - return constant; - } - - @Override - public Object extract(Bucket bucket) { - return constant; - } - - @Override - public String hitName() { - return null; - } - - @Override - public boolean equals(Object obj) { - if (obj == null || obj.getClass() != getClass()) { - return false; - } - ConstantExtractor other = (ConstantExtractor) obj; - return Objects.equals(constant, other.constant); - } - - @Override - public int hashCode() { - return Objects.hashCode(constant); - } - - @Override - public String toString() { - return "^" + constant; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/HitExtractor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/HitExtractor.java deleted file mode 100644 index 38b72c5e8cd7e..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/HitExtractor.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.search.SearchHit; - -/** - * Extracts a column value from a {@link SearchHit}. - */ -public interface HitExtractor extends NamedWriteable { - /** - * Extract the value from a hit. - */ - Object extract(SearchHit hit); - - /** - * Name of the inner hit needed by this extractor if it needs one, {@code null} otherwise. - */ - @Nullable - String hitName(); -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/HitExtractors.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/HitExtractors.java deleted file mode 100644 index 743856d41f8d5..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/HitExtractors.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; - -import java.util.ArrayList; -import java.util.List; - -public final class HitExtractors { - - private HitExtractors() {} - - /** - * All of the named writeables needed to deserialize the instances of - * {@linkplain HitExtractor}. - */ - public static List getNamedWriteables() { - List entries = new ArrayList<>(); - entries.add(new Entry(HitExtractor.class, ConstantExtractor.NAME, ConstantExtractor::new)); - entries.add(new Entry(HitExtractor.class, ComputingExtractor.NAME, ComputingExtractor::new)); - return entries; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/TotalHitsExtractor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/TotalHitsExtractor.java deleted file mode 100644 index 52a9116619024..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/TotalHitsExtractor.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; - -import java.io.IOException; - -public class TotalHitsExtractor extends ConstantExtractor { - - public TotalHitsExtractor(Long constant) { - super(constant); - } - - TotalHitsExtractor(StreamInput in) throws IOException { - super(in); - } - - @Override - public Object extract(MultiBucketsAggregation.Bucket bucket) { - return validate(super.extract(bucket)); - } - - @Override - public Object extract(SearchHit hit) { - return validate(super.extract(hit)); - } - - private static Object validate(Object value) { - if (Number.class.isInstance(value) == false) { - throw new QlIllegalArgumentException( - "Inconsistent total hits count handling, expected a numeric value but found a {}: {}", - value == null ? null : value.getClass().getSimpleName(), - value - ); - } - if (((Number) value).longValue() < 0) { - throw new QlIllegalArgumentException( - "Inconsistent total hits count handling, expected a non-negative value but found {}", - value - ); - } - return value; - } - -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Alias.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Alias.java index d9f99b6d92318..01cc716a20547 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Alias.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Alias.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; import java.util.List; @@ -64,7 +63,7 @@ public Alias(StreamInput in) throws IOException { Source.readFrom((StreamInput & PlanStreamInput) in), in.readString(), in.readOptionalString(), - ((PlanStreamInput) in).readExpression(), + in.readNamedWriteable(Expression.class), NameId.readFrom((StreamInput & PlanStreamInput) in), in.readBoolean() ); @@ -75,7 +74,7 @@ public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); out.writeString(name()); out.writeOptionalString(qualifier()); - ((PlanStreamOutput) out).writeExpression(child()); + out.writeNamedWriteable(child()); id().writeTo(out); out.writeBoolean(synthetic()); } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java index ee7e0aa81f81e..df8b6732ac0d4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.core.expression; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.capabilities.Resolvable; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.StringUtils; -import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -30,6 +30,14 @@ * (which is a type of expression) with a single child, c. */ public abstract class Expression extends Node implements Resolvable, NamedWriteable { + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + for (NamedWriteableRegistry.Entry e : NamedExpression.getNamedWriteables()) { + entries.add(new NamedWriteableRegistry.Entry(Expression.class, e.name, in -> (NamedExpression) e.reader.read(in))); + } + entries.add(Literal.ENTRY); + return entries; + } public static class TypeResolution { private final boolean failed; @@ -81,18 +89,6 @@ public Expression(Source source, List children) { super(source, children); } - @Override - public void writeTo(StreamOutput out) throws IOException { - // TODO remove this function entirely once all subclasses implement it - throw new UnsupportedOperationException("todo unsupported"); - } - - @Override - public String getWriteableName() { - // TODO remove this function entirely once all subclasses implement it - throw new UnsupportedOperationException("todo unsupported"); - } - // whether the expression can be evaluated statically (folded) or not public boolean foldable() { return false; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java index eac3586cf139d..fd7382b0098c9 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IndexModeFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -26,6 +27,9 @@ import static org.elasticsearch.core.Tuple.tuple; public class MetadataAttribute extends TypedAttribute { + public static final String TIMESTAMP_FIELD = "@timestamp"; + public static final String TSID_FIELD = "_tsid"; + static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, "MetadataAttribute", @@ -42,7 +46,9 @@ public class MetadataAttribute extends TypedAttribute { IgnoredFieldMapper.NAME, tuple(DataType.KEYWORD, true), SourceFieldMapper.NAME, - tuple(DataType.SOURCE, false) + tuple(DataType.SOURCE, false), + IndexModeFieldMapper.NAME, + tuple(DataType.KEYWORD, true) ); private final boolean searchable; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Order.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Order.java index a7377aab369b7..f73051fd9662a 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Order.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Order.java @@ -6,10 +6,12 @@ */ package org.elasticsearch.xpack.esql.core.expression; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.io.IOException; import java.util.List; import java.util.Objects; @@ -46,6 +48,16 @@ public Order(Source source, Expression child, OrderDirection direction, NullsPos this.nulls = nulls == null ? NullsPosition.ANY : nulls; } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Order::new, child, direction, nulls); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/TypeResolutions.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/TypeResolutions.java index 588b0a2af55d3..c3593e91c537e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/TypeResolutions.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/TypeResolutions.java @@ -50,8 +50,8 @@ public static TypeResolution isBoolean(Expression e, String operationName, Param return isType(e, dt -> dt == BOOLEAN, operationName, paramOrd, "boolean"); } - public static TypeResolution isInteger(Expression e, String operationName, ParamOrdinal paramOrd) { - return isType(e, DataType::isInteger, operationName, paramOrd, "integer"); + public static TypeResolution isWholeNumber(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isWholeNumber, operationName, paramOrd, "integer"); } public static TypeResolution isNumeric(Expression e, String operationName, ParamOrdinal paramOrd) { @@ -132,6 +132,28 @@ public static TypeResolution isFoldable(Expression e, String operationName, Para return TypeResolution.TYPE_RESOLVED; } + public static TypeResolution isNotNullAndFoldable(Expression e, String operationName, ParamOrdinal paramOrd) { + TypeResolution resolution = isFoldable(e, operationName, paramOrd); + + if (resolution.unresolved()) { + return resolution; + } + + if (e.dataType() == DataType.NULL || e.fold() == null) { + resolution = new TypeResolution( + format( + null, + "{}argument of [{}] cannot be null, received [{}]", + paramOrd == null || paramOrd == DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " ", + operationName, + Expressions.name(e) + ) + ); + } + + return resolution; + } + public static TypeResolution isNotFoldable(Expression e, String operationName, ParamOrdinal paramOrd) { if (e.foldable()) { return new TypeResolution( diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionRegistry.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionRegistry.java deleted file mode 100644 index 48da08b915220..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionRegistry.java +++ /dev/null @@ -1,463 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.function; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.xpack.esql.core.ParsingException; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.session.Configuration; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.util.Check; - -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Map.Entry; -import java.util.function.BiFunction; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyList; -import static java.util.Collections.unmodifiableList; -import static java.util.stream.Collectors.toList; - -public class FunctionRegistry { - - // Translation table for error messaging in the following function - private static final String[] NUM_NAMES = { "zero", "one", "two", "three", "four", "five", }; - - // list of functions grouped by type of functions (aggregate, statistics, math etc) and ordered alphabetically inside each group - // a single function will have one entry for itself with its name associated to its instance and, also, one entry for each alias - // it has with the alias name associated to the FunctionDefinition instance - private final Map defs = new LinkedHashMap<>(); - private final Map aliases = new HashMap<>(); - - public FunctionRegistry() {} - - /** - * Register the given function definitions with this registry. - */ - @SuppressWarnings("this-escape") - public FunctionRegistry(FunctionDefinition... functions) { - register(functions); - } - - @SuppressWarnings("this-escape") - public FunctionRegistry(FunctionDefinition[]... groupFunctions) { - register(groupFunctions); - } - - protected void register(FunctionDefinition[]... groupFunctions) { - for (FunctionDefinition[] group : groupFunctions) { - register(group); - } - } - - protected void register(FunctionDefinition... functions) { - // temporary map to hold [function_name/alias_name : function instance] - Map batchMap = new HashMap<>(); - for (FunctionDefinition f : functions) { - batchMap.put(f.name(), f); - for (String alias : f.aliases()) { - Object old = batchMap.put(alias, f); - if (old != null || defs.containsKey(alias)) { - throw new QlIllegalArgumentException( - "alias [" - + alias - + "] is used by " - + "[" - + (old != null ? old : defs.get(alias).name()) - + "] and [" - + f.name() - + "]" - ); - } - aliases.put(alias, f.name()); - } - } - // sort the temporary map by key name and add it to the global map of functions - defs.putAll( - batchMap.entrySet() - .stream() - .sorted(Map.Entry.comparingByKey()) - .collect( - Collectors.< - Entry, - String, - FunctionDefinition, - LinkedHashMap>toMap( - Map.Entry::getKey, - Map.Entry::getValue, - (oldValue, newValue) -> oldValue, - LinkedHashMap::new - ) - ) - ); - } - - public FunctionDefinition resolveFunction(String functionName) { - FunctionDefinition def = defs.get(functionName); - if (def == null) { - throw new QlIllegalArgumentException("Cannot find function {}; this should have been caught during analysis", functionName); - } - return def; - } - - protected String normalize(String name) { - return name.toUpperCase(Locale.ROOT); - } - - public String resolveAlias(String alias) { - String normalized = normalize(alias); - return aliases.getOrDefault(normalized, normalized); - } - - public boolean functionExists(String functionName) { - return defs.containsKey(functionName); - } - - public Collection listFunctions() { - // It is worth double checking if we need this copy. These are immutable anyway. - return defs.values(); - } - - public Collection listFunctions(String pattern) { - // It is worth double checking if we need this copy. These are immutable anyway. - Pattern p = Strings.hasText(pattern) ? Pattern.compile(normalize(pattern)) : null; - return defs.entrySet() - .stream() - .filter(e -> p == null || p.matcher(e.getKey()).matches()) - .map(e -> cloneDefinition(e.getKey(), e.getValue())) - .collect(toList()); - } - - protected FunctionDefinition cloneDefinition(String name, FunctionDefinition definition) { - return new FunctionDefinition(name, emptyList(), definition.clazz(), definition.builder()); - } - - protected interface FunctionBuilder { - Function build(Source source, List children, Configuration cfg); - } - - /** - * Main method to register a function. - * - * @param names Must always have at least one entry which is the method's primary name - */ - @SuppressWarnings("overloads") - protected static FunctionDefinition def(Class function, FunctionBuilder builder, String... names) { - Check.isTrue(names.length > 0, "At least one name must be provided for the function"); - String primaryName = names[0]; - List aliases = Arrays.asList(names).subList(1, names.length); - FunctionDefinition.Builder realBuilder = (uf, cfg, extras) -> { - if (CollectionUtils.isEmpty(extras) == false) { - throw new ParsingException( - uf.source(), - "Unused parameters {} detected when building [{}]", - Arrays.toString(extras), - primaryName - ); - } - try { - return builder.build(uf.source(), uf.children(), cfg); - } catch (QlIllegalArgumentException e) { - throw new ParsingException(e, uf.source(), "error building [{}]: {}", primaryName, e.getMessage()); - } - }; - return new FunctionDefinition(primaryName, unmodifiableList(aliases), function, realBuilder); - } - - /** - * Build a {@linkplain FunctionDefinition} for a no-argument function. - */ - protected static FunctionDefinition def( - Class function, - java.util.function.Function ctorRef, - String... names - ) { - FunctionBuilder builder = (source, children, cfg) -> { - if (false == children.isEmpty()) { - throw new QlIllegalArgumentException("expects no arguments"); - } - return ctorRef.apply(source); - }; - return def(function, builder, names); - } - - /** - * Build a {@linkplain FunctionDefinition} for a unary function. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - public static FunctionDefinition def( - Class function, - BiFunction ctorRef, - String... names - ) { - FunctionBuilder builder = (source, children, cfg) -> { - if (children.size() != 1) { - throw new QlIllegalArgumentException("expects exactly one argument"); - } - return ctorRef.apply(source, children.get(0)); - }; - return def(function, builder, names); - } - - /** - * Build a {@linkplain FunctionDefinition} for multi-arg/n-ary function. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected FunctionDefinition def(Class function, NaryBuilder ctorRef, String... names) { - FunctionBuilder builder = (source, children, cfg) -> { return ctorRef.build(source, children); }; - return def(function, builder, names); - } - - protected interface NaryBuilder { - T build(Source source, List children); - } - - /** - * Build a {@linkplain FunctionDefinition} for a binary function. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected static FunctionDefinition def(Class function, BinaryBuilder ctorRef, String... names) { - FunctionBuilder builder = (source, children, cfg) -> { - boolean isBinaryOptionalParamFunction = OptionalArgument.class.isAssignableFrom(function); - if (isBinaryOptionalParamFunction && (children.size() > 2 || children.size() < 1)) { - throw new QlIllegalArgumentException("expects one or two arguments"); - } else if (isBinaryOptionalParamFunction == false && children.size() != 2) { - throw new QlIllegalArgumentException("expects exactly two arguments"); - } - - return ctorRef.build(source, children.get(0), children.size() == 2 ? children.get(1) : null); - }; - return def(function, builder, names); - } - - protected interface BinaryBuilder { - T build(Source source, Expression left, Expression right); - } - - /** - * Build a {@linkplain FunctionDefinition} for a ternary function. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected static FunctionDefinition def(Class function, TernaryBuilder ctorRef, String... names) { - FunctionBuilder builder = (source, children, cfg) -> { - boolean hasMinimumTwo = OptionalArgument.class.isAssignableFrom(function); - if (hasMinimumTwo && (children.size() > 3 || children.size() < 2)) { - throw new QlIllegalArgumentException("expects two or three arguments"); - } else if (hasMinimumTwo == false && children.size() != 3) { - throw new QlIllegalArgumentException("expects exactly three arguments"); - } - return ctorRef.build(source, children.get(0), children.get(1), children.size() == 3 ? children.get(2) : null); - }; - return def(function, builder, names); - } - - protected interface TernaryBuilder { - T build(Source source, Expression one, Expression two, Expression three); - } - - /** - * Build a {@linkplain FunctionDefinition} for a quaternary function. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected static FunctionDefinition def(Class function, QuaternaryBuilder ctorRef, String... names) { - FunctionBuilder builder = (source, children, cfg) -> { - if (OptionalArgument.class.isAssignableFrom(function)) { - if (children.size() > 4 || children.size() < 3) { - throw new QlIllegalArgumentException("expects three or four arguments"); - } - } else if (TwoOptionalArguments.class.isAssignableFrom(function)) { - if (children.size() > 4 || children.size() < 2) { - throw new QlIllegalArgumentException("expects minimum two, maximum four arguments"); - } - } else if (children.size() != 4) { - throw new QlIllegalArgumentException("expects exactly four arguments"); - } - return ctorRef.build( - source, - children.get(0), - children.get(1), - children.size() > 2 ? children.get(2) : null, - children.size() > 3 ? children.get(3) : null - ); - }; - return def(function, builder, names); - } - - protected interface QuaternaryBuilder { - T build(Source source, Expression one, Expression two, Expression three, Expression four); - } - - /** - * Build a {@linkplain FunctionDefinition} for a quinary function. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected static FunctionDefinition def( - Class function, - QuinaryBuilder ctorRef, - int numOptionalParams, - String... names - ) { - FunctionBuilder builder = (source, children, cfg) -> { - final int NUM_TOTAL_PARAMS = 5; - boolean hasOptionalParams = OptionalArgument.class.isAssignableFrom(function); - if (hasOptionalParams && (children.size() > NUM_TOTAL_PARAMS || children.size() < NUM_TOTAL_PARAMS - numOptionalParams)) { - throw new QlIllegalArgumentException( - "expects between " - + NUM_NAMES[NUM_TOTAL_PARAMS - numOptionalParams] - + " and " - + NUM_NAMES[NUM_TOTAL_PARAMS] - + " arguments" - ); - } else if (hasOptionalParams == false && children.size() != NUM_TOTAL_PARAMS) { - throw new QlIllegalArgumentException("expects exactly " + NUM_NAMES[NUM_TOTAL_PARAMS] + " arguments"); - } - return ctorRef.build( - source, - children.size() > 0 ? children.get(0) : null, - children.size() > 1 ? children.get(1) : null, - children.size() > 2 ? children.get(2) : null, - children.size() > 3 ? children.get(3) : null, - children.size() > 4 ? children.get(4) : null - ); - }; - return def(function, builder, names); - } - - protected interface QuinaryBuilder { - T build(Source source, Expression one, Expression two, Expression three, Expression four, Expression five); - } - - /** - * Build a {@linkplain FunctionDefinition} for functions with a mandatory argument followed by a varidic list. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected static FunctionDefinition def(Class function, UnaryVariadicBuilder ctorRef, String... names) { - FunctionBuilder builder = (source, children, cfg) -> { - boolean hasMinimumOne = OptionalArgument.class.isAssignableFrom(function); - if (hasMinimumOne && children.size() < 1) { - throw new QlIllegalArgumentException("expects at least one argument"); - } else if (hasMinimumOne == false && children.size() < 2) { - throw new QlIllegalArgumentException("expects at least two arguments"); - } - return ctorRef.build(source, children.get(0), children.subList(1, children.size())); - }; - return def(function, builder, names); - } - - protected interface UnaryVariadicBuilder { - T build(Source source, Expression exp, List variadic); - } - - /** - * Build a {@linkplain FunctionDefinition} for a no-argument function that is configuration aware. - */ - @SuppressWarnings("overloads") - protected static FunctionDefinition def(Class function, ConfigurationAwareBuilder ctorRef, String... names) { - FunctionBuilder builder = (source, children, cfg) -> { - if (false == children.isEmpty()) { - throw new QlIllegalArgumentException("expects no arguments"); - } - return ctorRef.build(source, cfg); - }; - return def(function, builder, names); - } - - protected interface ConfigurationAwareBuilder { - T build(Source source, Configuration configuration); - } - - /** - * Build a {@linkplain FunctionDefinition} for a one-argument function that is configuration aware. - */ - @SuppressWarnings("overloads") - protected static FunctionDefinition def( - Class function, - UnaryConfigurationAwareBuilder ctorRef, - String... names - ) { - FunctionBuilder builder = (source, children, cfg) -> { - if (children.size() > 1) { - throw new QlIllegalArgumentException("expects exactly one argument"); - } - Expression ex = children.size() == 1 ? children.get(0) : null; - return ctorRef.build(source, ex, cfg); - }; - return def(function, builder, names); - } - - protected interface UnaryConfigurationAwareBuilder { - T build(Source source, Expression exp, Configuration configuration); - } - - /** - * Build a {@linkplain FunctionDefinition} for a binary function that is configuration aware. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected static FunctionDefinition def( - Class function, - BinaryConfigurationAwareBuilder ctorRef, - String... names - ) { - FunctionBuilder builder = (source, children, cfg) -> { - boolean isBinaryOptionalParamFunction = OptionalArgument.class.isAssignableFrom(function); - if (isBinaryOptionalParamFunction && (children.size() > 2 || children.size() < 1)) { - throw new QlIllegalArgumentException("expects one or two arguments"); - } else if (isBinaryOptionalParamFunction == false && children.size() != 2) { - throw new QlIllegalArgumentException("expects exactly two arguments"); - } - return ctorRef.build(source, children.get(0), children.size() == 2 ? children.get(1) : null, cfg); - }; - return def(function, builder, names); - } - - protected interface BinaryConfigurationAwareBuilder { - T build(Source source, Expression left, Expression right, Configuration configuration); - } - - /** - * Build a {@linkplain FunctionDefinition} for a ternary function that is configuration aware. - */ - @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do - protected FunctionDefinition def(Class function, TernaryConfigurationAwareBuilder ctorRef, String... names) { - FunctionBuilder builder = (source, children, cfg) -> { - boolean hasMinimumTwo = OptionalArgument.class.isAssignableFrom(function); - if (hasMinimumTwo && (children.size() > 3 || children.size() < 2)) { - throw new QlIllegalArgumentException("expects two or three arguments"); - } else if (hasMinimumTwo == false && children.size() != 3) { - throw new QlIllegalArgumentException("expects exactly three arguments"); - } - return ctorRef.build(source, children.get(0), children.get(1), children.size() == 3 ? children.get(2) : null, cfg); - }; - return def(function, builder, names); - } - - protected interface TernaryConfigurationAwareBuilder { - T build(Source source, Expression one, Expression two, Expression three, Configuration configuration); - } - - // - // Utility method for extra argument extraction. - // - protected static Boolean asBool(Object[] extras) { - if (CollectionUtils.isEmpty(extras)) { - return null; - } - if (extras.length != 1 || (extras[0] instanceof Boolean) == false) { - throw new QlIllegalArgumentException("Invalid number and types of arguments given to function definition"); - } - return (Boolean) extras[0]; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionTypeRegistry.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionTypeRegistry.java deleted file mode 100644 index 8ba40d5b167ff..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionTypeRegistry.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.function; - -public interface FunctionTypeRegistry { - - String type(Class clazz); -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Functions.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Functions.java deleted file mode 100644 index 46f9d8399503d..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Functions.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.function; - -import org.elasticsearch.xpack.esql.core.expression.Expression; - -/** - * @deprecated for removal - */ -@Deprecated -public abstract class Functions { - - /** - * @deprecated for removal - */ - @Deprecated - public static boolean isAggregate(Expression e) { - throw new IllegalStateException("Should never reach this code"); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BaseSurrogateFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BaseSurrogateFunction.java deleted file mode 100644 index efbcc4f869620..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BaseSurrogateFunction.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.function.scalar; - -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; - -public abstract class BaseSurrogateFunction extends ScalarFunction implements SurrogateFunction { - - private ScalarFunction lazySubstitute; - - public BaseSurrogateFunction(Source source) { - super(source); - } - - public BaseSurrogateFunction(Source source, List fields) { - super(source, fields); - } - - @Override - public ScalarFunction substitute() { - if (lazySubstitute == null) { - lazySubstitute = makeSubstitute(); - } - return lazySubstitute; - } - - protected abstract ScalarFunction makeSubstitute(); - - @Override - public boolean foldable() { - return substitute().foldable(); - } - - @Override - public Object fold() { - return substitute().fold(); - } - -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java index 4b462719a375b..d49fa07cd82c9 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/BinaryScalarFunction.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; import java.util.Arrays; @@ -30,16 +29,16 @@ protected BinaryScalarFunction(Source source, Expression left, Expression right) protected BinaryScalarFunction(StreamInput in) throws IOException { this( Source.readFrom((StreamInput & PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - ((PlanStreamInput) in).readExpression() + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(left()); - ((PlanStreamOutput) out).writeExpression(right()); + out.writeNamedWriteable(left); + out.writeNamedWriteable(right); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/ConfigurationFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/ConfigurationFunction.java deleted file mode 100644 index fe2e527b57417..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/ConfigurationFunction.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.function.scalar; - -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.session.Configuration; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; - -public abstract class ConfigurationFunction extends ScalarFunction { - - private final Configuration configuration; - - protected ConfigurationFunction(Source source, List fields, Configuration configuration) { - super(source, fields); - this.configuration = configuration; - } - - public Configuration configuration() { - return configuration; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java index e5c2cedfd087b..8704a42ed33e2 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/UnaryScalarFunction.java @@ -9,10 +9,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; import java.util.List; @@ -29,13 +27,13 @@ protected UnaryScalarFunction(Source source, Expression field) { } protected UnaryScalarFunction(StreamInput in) throws IOException { - this(Source.readFrom((StreamInput & PlanStreamInput) in), ((PlanStreamInput) in).readExpression()); + this(Source.readFrom((StreamInput & PlanStreamInput) in), in.readNamedWriteable(Expression.class)); } @Override - public final void writeTo(StreamOutput out) throws IOException { + public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(field); + out.writeNamedWriteable(field); } @Override @@ -49,15 +47,11 @@ public Expression field() { return field; } - protected abstract Processor makeProcessor(); - @Override public boolean foldable() { return field.foldable(); } @Override - public Object fold() { - return makeProcessor().process(field().fold()); - } + public abstract Object fold(); } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/BinaryComparisonCaseInsensitiveFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/BinaryComparisonCaseInsensitiveFunction.java deleted file mode 100644 index 4739fe910b769..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/BinaryComparisonCaseInsensitiveFunction.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.function.scalar.string; - -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.DataType; - -import java.util.Objects; - -import static java.util.Arrays.asList; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isStringAndExact; - -public abstract class BinaryComparisonCaseInsensitiveFunction extends CaseInsensitiveScalarFunction { - - private final Expression left, right; - - protected BinaryComparisonCaseInsensitiveFunction(Source source, Expression left, Expression right, boolean caseInsensitive) { - super(source, asList(left, right), caseInsensitive); - this.left = left; - this.right = right; - } - - @Override - protected TypeResolution resolveType() { - if (childrenResolved() == false) { - return new TypeResolution("Unresolved children"); - } - - TypeResolution sourceResolution = isStringAndExact(left, sourceText(), FIRST); - if (sourceResolution.unresolved()) { - return sourceResolution; - } - - return isStringAndExact(right, sourceText(), SECOND); - } - - public Expression left() { - return left; - } - - public Expression right() { - return right; - } - - @Override - public DataType dataType() { - return DataType.BOOLEAN; - } - - @Override - public boolean foldable() { - return left.foldable() && right.foldable(); - } - - @Override - public int hashCode() { - return Objects.hash(left, right, isCaseInsensitive()); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - BinaryComparisonCaseInsensitiveFunction other = (BinaryComparisonCaseInsensitiveFunction) obj; - return Objects.equals(left, other.left) - && Objects.equals(right, other.right) - && Objects.equals(isCaseInsensitive(), other.isCaseInsensitive()); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/CaseInsensitiveScalarFunction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/CaseInsensitiveScalarFunction.java deleted file mode 100644 index bd3b1aed73390..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/CaseInsensitiveScalarFunction.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.function.scalar.string; - -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; -import java.util.Objects; - -public abstract class CaseInsensitiveScalarFunction extends ScalarFunction { - - private final boolean caseInsensitive; - - protected CaseInsensitiveScalarFunction(Source source, List fields, boolean caseInsensitive) { - super(source, fields); - this.caseInsensitive = caseInsensitive; - } - - public boolean isCaseInsensitive() { - return caseInsensitive; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), isCaseInsensitive()); - } - - @Override - public boolean equals(Object other) { - return super.equals(other) && Objects.equals(((CaseInsensitiveScalarFunction) other).caseInsensitive, caseInsensitive); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/StartsWithFunctionProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/StartsWithFunctionProcessor.java deleted file mode 100644 index 8172971fc39f0..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/string/StartsWithFunctionProcessor.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.function.scalar.string; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; -import java.util.Locale; -import java.util.Objects; - -public class StartsWithFunctionProcessor implements Processor { - - public static final String NAME = "sstw"; - - private final Processor source; - private final Processor pattern; - private final boolean caseInsensitive; - - public StartsWithFunctionProcessor(Processor source, Processor pattern, boolean caseInsensitive) { - this.source = source; - this.pattern = pattern; - this.caseInsensitive = caseInsensitive; - } - - public StartsWithFunctionProcessor(StreamInput in) throws IOException { - source = in.readNamedWriteable(Processor.class); - pattern = in.readNamedWriteable(Processor.class); - caseInsensitive = in.readBoolean(); - } - - @Override - public final void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(source); - out.writeNamedWriteable(pattern); - out.writeBoolean(caseInsensitive); - } - - @Override - public Object process(Object input) { - return doProcess(source.process(input), pattern.process(input), isCaseInsensitive()); - } - - public static Object doProcess(Object source, Object pattern, boolean caseInsensitive) { - if (source == null) { - return null; - } - if (source instanceof String == false && source instanceof Character == false) { - throw new QlIllegalArgumentException("A string/char is required; received [{}]", source); - } - if (pattern == null) { - return null; - } - if (pattern instanceof String == false && pattern instanceof Character == false) { - throw new QlIllegalArgumentException("A string/char is required; received [{}]", pattern); - } - - if (caseInsensitive == false) { - return source.toString().startsWith(pattern.toString()); - } else { - return source.toString().toLowerCase(Locale.ROOT).startsWith(pattern.toString().toLowerCase(Locale.ROOT)); - } - } - - protected Processor source() { - return source; - } - - protected Processor pattern() { - return pattern; - } - - protected boolean isCaseInsensitive() { - return caseInsensitive; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - StartsWithFunctionProcessor other = (StartsWithFunctionProcessor) obj; - return Objects.equals(source(), other.source()) - && Objects.equals(pattern(), other.pattern()) - && Objects.equals(isCaseInsensitive(), other.isCaseInsensitive()); - } - - @Override - public int hashCode() { - return Objects.hash(source(), pattern(), isCaseInsensitive()); - } - - @Override - public String getWriteableName() { - return NAME; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/whitelist/InternalQlScriptUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/whitelist/InternalQlScriptUtils.java deleted file mode 100644 index e361d2465a1c5..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/whitelist/InternalQlScriptUtils.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.function.scalar.whitelist; - -import org.elasticsearch.index.fielddata.ScriptDocValues; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.string.StartsWithFunctionProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogicProcessor.BinaryLogicOperation; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.NotProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.CheckNullProcessor.CheckNullOperation; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.DefaultBinaryArithmeticOperation; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.InProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RegexProcessor.RegexOperation; -import org.elasticsearch.xpack.esql.core.util.StringUtils; - -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.xpack.esql.core.type.DataType.fromTypeName; -import static org.elasticsearch.xpack.esql.core.type.DataTypeConverter.convert; -import static org.elasticsearch.xpack.esql.core.type.DataTypeConverter.toUnsignedLong; - -public class InternalQlScriptUtils { - - // - // Utilities - // - - // safe missing mapping/value extractor - public static Object docValue(Map> doc, String fieldName) { - if (doc.containsKey(fieldName)) { - ScriptDocValues docValues = doc.get(fieldName); - if (docValues.isEmpty() == false) { - return docValues.get(0); - } - } - return null; - } - - public static boolean nullSafeFilter(Boolean filter) { - return filter == null ? false : filter.booleanValue(); - } - - public static double nullSafeSortNumeric(Number sort) { - return sort == null ? 0.0d : sort.doubleValue(); - } - - public static String nullSafeSortString(Object sort) { - return sort == null ? StringUtils.EMPTY : sort.toString(); - } - - public static Number nullSafeCastNumeric(Number number, String typeName) { - return number == null || Double.isNaN(number.doubleValue()) ? null : (Number) convert(number, fromTypeName(typeName)); - } - - public static Number nullSafeCastToUnsignedLong(Number number) { - return number == null || Double.isNaN(number.doubleValue()) ? null : toUnsignedLong(number); - } - - // - // Operators - // - - // - // Logical - // - public static Boolean eq(Object left, Object right) { - return BinaryComparisonOperation.EQ.apply(left, right); - } - - public static Boolean nulleq(Object left, Object right) { - return BinaryComparisonOperation.NULLEQ.apply(left, right); - } - - public static Boolean neq(Object left, Object right) { - return BinaryComparisonOperation.NEQ.apply(left, right); - } - - public static Boolean lt(Object left, Object right) { - return BinaryComparisonOperation.LT.apply(left, right); - } - - public static Boolean lte(Object left, Object right) { - return BinaryComparisonOperation.LTE.apply(left, right); - } - - public static Boolean gt(Object left, Object right) { - return BinaryComparisonOperation.GT.apply(left, right); - } - - public static Boolean gte(Object left, Object right) { - return BinaryComparisonOperation.GTE.apply(left, right); - } - - public static Boolean in(Object value, List values) { - return InProcessor.apply(value, values); - } - - public static Boolean and(Boolean left, Boolean right) { - return BinaryLogicOperation.AND.apply(left, right); - } - - public static Boolean or(Boolean left, Boolean right) { - return BinaryLogicOperation.OR.apply(left, right); - } - - public static Boolean not(Boolean expression) { - return NotProcessor.apply(expression); - } - - public static Boolean isNull(Object expression) { - return CheckNullOperation.IS_NULL.test(expression); - } - - public static Boolean isNotNull(Object expression) { - return CheckNullOperation.IS_NOT_NULL.test(expression); - } - - // - // Regex - // - public static Boolean regex(String value, String pattern) { - return regex(value, pattern, Boolean.FALSE); - } - - public static Boolean regex(String value, String pattern, Boolean caseInsensitive) { - // TODO: this needs to be improved to avoid creating the pattern on every call - return RegexOperation.match(value, pattern, caseInsensitive); - } - - // - // Math - // - public static Number add(Number left, Number right) { - return (Number) DefaultBinaryArithmeticOperation.ADD.apply(left, right); - } - - public static Number div(Number left, Number right) { - return (Number) DefaultBinaryArithmeticOperation.DIV.apply(left, right); - } - - public static Number mod(Number left, Number right) { - return (Number) DefaultBinaryArithmeticOperation.MOD.apply(left, right); - } - - public static Number mul(Number left, Number right) { - return (Number) DefaultBinaryArithmeticOperation.MUL.apply(left, right); - } - - public static Number neg(Number value) { - return UnaryArithmeticOperation.NEGATE.apply(value); - } - - public static Number sub(Number left, Number right) { - return (Number) DefaultBinaryArithmeticOperation.SUB.apply(left, right); - } - - // - // String - // - public static Boolean startsWith(String s, String pattern, Boolean caseInsensitive) { - return (Boolean) StartsWithFunctionProcessor.doProcess(s, pattern, caseInsensitive); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/BinaryProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/BinaryProcessor.java deleted file mode 100644 index 13c4498e54986..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/BinaryProcessor.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -public abstract class BinaryProcessor implements Processor { - - private final Processor left, right; - - public BinaryProcessor(Processor left, Processor right) { - this.left = left; - this.right = right; - } - - protected BinaryProcessor(StreamInput in) throws IOException { - left = in.readNamedWriteable(Processor.class); - right = in.readNamedWriteable(Processor.class); - } - - @Override - public final void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(left); - out.writeNamedWriteable(right); - doWrite(out); - } - - protected abstract void doWrite(StreamOutput out) throws IOException; - - @Override - public Object process(Object input) { - Object l = left.process(input); - if (l == null) { - return null; - } - checkParameter(l); - - Object r = right.process(input); - if (r == null) { - return null; - } - checkParameter(r); - - return doProcess(l, r); - } - - /** - * Checks the parameter (typically for its type) if the value is not null. - */ - protected void checkParameter(Object param) { - // no-op - } - - protected Processor left() { - return left; - } - - protected Processor right() { - return right; - } - - protected abstract Object doProcess(Object left, Object right); -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/BucketExtractorProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/BucketExtractorProcessor.java deleted file mode 100644 index afd4efc0e88e7..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/BucketExtractorProcessor.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.execution.search.extractor.BucketExtractor; - -import java.io.IOException; -import java.util.Objects; - -/** - * Processor wrapping an {@link BucketExtractor}, essentially being a source/leaf of a - * Processor tree. - */ -public class BucketExtractorProcessor implements Processor { - - public static final String NAME = "a"; - - private final BucketExtractor extractor; - - public BucketExtractorProcessor(BucketExtractor extractor) { - this.extractor = extractor; - } - - public BucketExtractorProcessor(StreamInput in) throws IOException { - extractor = in.readNamedWriteable(BucketExtractor.class); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(extractor); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public Object process(Object input) { - if ((input instanceof Bucket) == false) { - throw new QlIllegalArgumentException("Expected an agg bucket but received {}", input); - } - return extractor.extract((Bucket) input); - } - - @Override - public int hashCode() { - return Objects.hash(extractor); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - BucketExtractorProcessor other = (BucketExtractorProcessor) obj; - return Objects.equals(extractor, other.extractor); - } - - @Override - public String toString() { - return extractor.toString(); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ChainingProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ChainingProcessor.java deleted file mode 100644 index 60e60bc264369..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ChainingProcessor.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Objects; - -/** - * A {@linkplain Processor} that composes the results of two - * {@linkplain Processor}s. - */ -public class ChainingProcessor extends UnaryProcessor { - public static final String NAME = "."; - - private final Processor processor; - - public ChainingProcessor(Processor first, Processor second) { - super(first); - this.processor = second; - } - - public ChainingProcessor(StreamInput in) throws IOException { - super(in); - processor = in.readNamedWriteable(Processor.class); - } - - @Override - protected void doWrite(StreamOutput out) throws IOException { - out.writeNamedWriteable(processor); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Object doProcess(Object input) { - return processor.process(input); - } - - Processor first() { - return child(); - } - - Processor second() { - return processor; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), processor); - } - - @Override - public boolean equals(Object obj) { - return super.equals(obj) && Objects.equals(processor, ((ChainingProcessor) obj).processor); - } - - @Override - public String toString() { - return processor + "(" + super.toString() + ")"; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantNamedWriteable.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantNamedWriteable.java deleted file mode 100644 index 97733ed4d705f..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantNamedWriteable.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.NamedWriteable; - -/** - * Marker interface used by QL for pluggable constant serialization. - */ -public interface ConstantNamedWriteable extends NamedWriteable { - -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantProcessor.java deleted file mode 100644 index ad426b641ed06..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantProcessor.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.versionfield.Version; - -import java.io.IOException; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.Objects; - -public class ConstantProcessor implements Processor { - - public static String NAME = "c"; - - private Object constant; - private final Type type; - - enum Type { - NAMED_WRITABLE, - ZONEDDATETIME, - GENERIC, - VERSION // Version is in x-pack, so StreamInput/Output cannot manage it as a generic type - } - - public ConstantProcessor(Object value) { - this.constant = value; - if (value instanceof NamedWriteable) { - type = Type.NAMED_WRITABLE; - } else if (value instanceof ZonedDateTime) { - type = Type.ZONEDDATETIME; - } else if (value instanceof Version) { - type = Type.VERSION; - } else { - type = Type.GENERIC; - } - } - - public ConstantProcessor(StreamInput in) throws IOException { - type = in.readEnum(Type.class); - switch (type) { - case NAMED_WRITABLE -> constant = in.readNamedWriteable(ConstantNamedWriteable.class); - case ZONEDDATETIME -> { - ZonedDateTime zdt; - ZoneId zoneId = in.readZoneId(); - zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(in.readLong()), zoneId); - constant = zdt.withNano(in.readInt()); - } - case VERSION -> constant = new Version(in.readString()); - case GENERIC -> constant = in.readGenericValue(); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(type); - switch (type) { - case NAMED_WRITABLE -> out.writeNamedWriteable((NamedWriteable) constant); - case ZONEDDATETIME -> { - ZonedDateTime zdt = (ZonedDateTime) constant; - out.writeZoneId(zdt.getZone()); - out.writeLong(zdt.toInstant().toEpochMilli()); - out.writeInt(zdt.getNano()); - } - case VERSION -> out.writeString(constant.toString()); - case GENERIC -> out.writeGenericValue(constant); - } - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public Object process(Object input) { - return constant; - } - - @Override - public int hashCode() { - return Objects.hashCode(constant); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - ConstantProcessor other = (ConstantProcessor) obj; - return Objects.equals(constant, other.constant); - } - - @Override - public String toString() { - return "^" + constant; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/FunctionalBinaryProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/FunctionalBinaryProcessor.java deleted file mode 100644 index 3713102b893f1..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/FunctionalBinaryProcessor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.util.Objects; -import java.util.function.BiFunction; - -/** - * Base class for definition binary processors based on functions (for applying). - */ -public abstract class FunctionalBinaryProcessor> extends BinaryProcessor { - - private final F function; - - protected FunctionalBinaryProcessor(Processor left, Processor right, F function) { - super(left, right); - this.function = function; - } - - protected FunctionalBinaryProcessor(StreamInput in, Reader reader) throws IOException { - super(in); - this.function = reader.read(in); - } - - public F function() { - return function; - } - - @SuppressWarnings("unchecked") - @Override - protected Object doProcess(Object left, Object right) { - return function.apply((T) left, (U) right); - } - - @Override - public int hashCode() { - return Objects.hash(left(), right(), function()); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - FunctionalBinaryProcessor other = (FunctionalBinaryProcessor) obj; - return Objects.equals(function(), other.function()) - && Objects.equals(left(), other.left()) - && Objects.equals(right(), other.right()); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/FunctionalEnumBinaryProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/FunctionalEnumBinaryProcessor.java deleted file mode 100644 index 352cea13535c1..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/FunctionalEnumBinaryProcessor.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.function.BiFunction; - -/** - * Base class for definition binary processors based on functions (for applying) defined as enums (for serialization purposes). - */ -public abstract class FunctionalEnumBinaryProcessor & BiFunction> extends FunctionalBinaryProcessor< - T, - U, - R, - F> { - - protected FunctionalEnumBinaryProcessor(Processor left, Processor right, F function) { - super(left, right, function); - } - - protected FunctionalEnumBinaryProcessor(StreamInput in, Reader reader) throws IOException { - super(in, reader); - } - - @Override - protected void doWrite(StreamOutput out) throws IOException { - out.writeEnum(function()); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/HitExtractorProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/HitExtractorProcessor.java deleted file mode 100644 index 1662a8192acf9..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/HitExtractorProcessor.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.execution.search.extractor.HitExtractor; - -import java.io.IOException; -import java.util.Objects; - -/** - * Processor wrapping a {@link HitExtractor}, essentially being a source/leaf of a - * Processor tree. - */ -public class HitExtractorProcessor implements Processor { - - public static final String NAME = "h"; - - private final HitExtractor extractor; - - public HitExtractorProcessor(HitExtractor extractor) { - this.extractor = extractor; - } - - public HitExtractorProcessor(StreamInput in) throws IOException { - extractor = in.readNamedWriteable(HitExtractor.class); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(extractor); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public Object process(Object input) { - if ((input instanceof SearchHit) == false) { - throw new QlIllegalArgumentException("Expected a SearchHit but received {}", input); - } - return extractor.extract((SearchHit) input); - } - - @Override - public int hashCode() { - return Objects.hash(extractor); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - HitExtractorProcessor other = (HitExtractorProcessor) obj; - return Objects.equals(extractor, other.extractor); - } - - @Override - public String toString() { - return extractor.toString(); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/Processor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/Processor.java deleted file mode 100644 index bafdf3b05f40c..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/Processor.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.NamedWriteable; - -/** - * A {@code Processor} evaluates locally an expression. For instance, ABS(foo). - * Aggregate functions are handled by ES but scalars are not. - * - * This is an opaque class, the computed/compiled result gets saved on the client during scrolling. - */ -public interface Processor extends NamedWriteable { - - Object process(Object input); -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/UnaryProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/UnaryProcessor.java deleted file mode 100644 index 4ddf851ce3c27..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/UnaryProcessor.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Objects; - -public abstract class UnaryProcessor implements Processor { - - private final Processor child; - - public UnaryProcessor(Processor child) { - this.child = child; - } - - protected UnaryProcessor(StreamInput in) throws IOException { - child = in.readNamedWriteable(Processor.class); - } - - @Override - public final void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(child); - doWrite(out); - } - - protected abstract void doWrite(StreamOutput out) throws IOException; - - @Override - public final Object process(Object input) { - return doProcess(child.process(input)); - } - - public Processor child() { - return child; - } - - protected abstract Object doProcess(Object input); - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - UnaryProcessor other = (UnaryProcessor) obj; - return Objects.equals(child, other.child); - } - - @Override - public int hashCode() { - return Objects.hashCode(child); - } - - @Override - public String toString() { - return Objects.toString(child); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Range.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Range.java index ee48fd84b8add..e734f97573c1c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Range.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Range.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; @@ -14,6 +15,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.DateUtils; +import java.io.IOException; import java.time.DateTimeException; import java.time.ZoneId; import java.util.List; @@ -39,6 +41,16 @@ public Range(Source src, Expression value, Expression lower, boolean inclLower, this.zoneId = zoneId; } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Range::new, value, lower, includeLower, upper, includeUpper, zoneId); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java index e8ca84bc72988..29a567e83211d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; import java.util.List; @@ -57,7 +56,7 @@ protected FullTextPredicate(StreamInput in) throws IOException { Source.readFrom((StreamInput & PlanStreamInput) in), in.readString(), in.readOptionalString(), - in.readCollectionAsList(input -> ((PlanStreamInput) in).readExpression()) + in.readNamedWriteableCollectionAsList(Expression.class) ); } @@ -92,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); out.writeString(query); out.writeOptionalString(options); - out.writeCollection(children(), (o, v) -> ((PlanStreamOutput) o).writeExpression(v)); + out.writeNamedWriteableCollection(children()); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/And.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/And.java index 81418aa78ce57..d2b801a012d0c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/And.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/And.java @@ -6,19 +6,32 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.logical; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogicProcessor.BinaryLogicOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; + public class And extends BinaryLogic implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "And", And::new); public And(Source source, Expression left, Expression right) { super(source, left, right, BinaryLogicOperation.AND); } + private And(StreamInput in) throws IOException { + super(in, BinaryLogicOperation.AND); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, And::new, left(), right()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogic.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogic.java index 39de0e0643c13..210e8265dcfe9 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogic.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogic.java @@ -6,13 +6,17 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.logical; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogicProcessor.BinaryLogicOperation; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; + +import java.io.IOException; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isBoolean; @@ -22,6 +26,22 @@ protected BinaryLogic(Source source, Expression left, Expression right, BinaryLo super(source, left, right, operation); } + protected BinaryLogic(StreamInput in, BinaryLogicOperation op) throws IOException { + this( + Source.readFrom((StreamInput & PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + op + ); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + out.writeNamedWriteable(left()); + out.writeNamedWriteable(right()); + } + @Override public DataType dataType() { return DataType.BOOLEAN; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicOperation.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicOperation.java new file mode 100644 index 0000000000000..8b8224334654a --- /dev/null +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicOperation.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.expression.predicate.logical; + +import org.elasticsearch.xpack.esql.core.expression.predicate.PredicateBiFunction; + +import java.util.function.BiFunction; + +public enum BinaryLogicOperation implements PredicateBiFunction { + + AND((l, r) -> { + if (Boolean.FALSE.equals(l) || Boolean.FALSE.equals(r)) { + return Boolean.FALSE; + } + if (l == null || r == null) { + return null; + } + return Boolean.logicalAnd(l.booleanValue(), r.booleanValue()); + }, "AND"), + OR((l, r) -> { + if (Boolean.TRUE.equals(l) || Boolean.TRUE.equals(r)) { + return Boolean.TRUE; + } + if (l == null || r == null) { + return null; + } + return Boolean.logicalOr(l.booleanValue(), r.booleanValue()); + }, "OR"); + + private final BiFunction process; + private final String symbol; + + BinaryLogicOperation(BiFunction process, String symbol) { + this.process = process; + this.symbol = symbol; + } + + @Override + public String symbol() { + return symbol; + } + + @Override + public Boolean apply(Boolean left, Boolean right) { + return process.apply(left, right); + } + + @Override + public final Boolean doApply(Boolean left, Boolean right) { + return null; + } + + @Override + public String toString() { + return symbol; + } +} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicProcessor.java deleted file mode 100644 index 14d6b819e87fe..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicProcessor.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.logical; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.FunctionalEnumBinaryProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; -import org.elasticsearch.xpack.esql.core.expression.predicate.PredicateBiFunction; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogicProcessor.BinaryLogicOperation; - -import java.io.IOException; -import java.util.function.BiFunction; - -public class BinaryLogicProcessor extends FunctionalEnumBinaryProcessor { - - public enum BinaryLogicOperation implements PredicateBiFunction { - - AND((l, r) -> { - if (Boolean.FALSE.equals(l) || Boolean.FALSE.equals(r)) { - return Boolean.FALSE; - } - if (l == null || r == null) { - return null; - } - return Boolean.logicalAnd(l.booleanValue(), r.booleanValue()); - }, "AND"), - OR((l, r) -> { - if (Boolean.TRUE.equals(l) || Boolean.TRUE.equals(r)) { - return Boolean.TRUE; - } - if (l == null || r == null) { - return null; - } - return Boolean.logicalOr(l.booleanValue(), r.booleanValue()); - }, "OR"); - - private final BiFunction process; - private final String symbol; - - BinaryLogicOperation(BiFunction process, String symbol) { - this.process = process; - this.symbol = symbol; - } - - @Override - public String symbol() { - return symbol; - } - - @Override - public Boolean apply(Boolean left, Boolean right) { - return process.apply(left, right); - } - - @Override - public final Boolean doApply(Boolean left, Boolean right) { - return null; - } - - @Override - public String toString() { - return symbol; - } - } - - public static final String NAME = "lb"; - - public BinaryLogicProcessor(Processor left, Processor right, BinaryLogicOperation operation) { - super(left, right, operation); - } - - public BinaryLogicProcessor(StreamInput in) throws IOException { - super(in, i -> i.readEnum(BinaryLogicOperation.class)); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected void checkParameter(Object param) { - if (param != null && (param instanceof Boolean) == false) { - throw new QlIllegalArgumentException("A boolean is required; received {}", param); - } - } - - @Override - public Object process(Object input) { - Object l = left().process(input); - checkParameter(l); - Object r = right().process(input); - checkParameter(r); - - return doProcess(l, r); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java index 5f183a1cc26ea..c4983b49a6bc8 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Not.java @@ -8,9 +8,9 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -57,12 +57,19 @@ protected TypeResolution resolveType() { @Override public Object fold() { - return NotProcessor.INSTANCE.process(field().fold()); + return apply(field().fold()); } - @Override - protected Processor makeProcessor() { - return NotProcessor.INSTANCE; + private static Boolean apply(Object input) { + if (input == null) { + return null; + } + + if ((input instanceof Boolean) == false) { + throw new QlIllegalArgumentException("A boolean is required; received {}", input); + } + + return ((Boolean) input).booleanValue() ? Boolean.FALSE : Boolean.TRUE; } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/NotProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/NotProcessor.java deleted file mode 100644 index 5f633c902dff0..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/NotProcessor.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.logical; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; - -public class NotProcessor implements Processor { - - public static final NotProcessor INSTANCE = new NotProcessor(); - - public static final String NAME = "ln"; - - private NotProcessor() {} - - public NotProcessor(StreamInput in) throws IOException {} - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public void writeTo(StreamOutput out) throws IOException {} - - @Override - public Object process(Object input) { - return apply(input); - } - - public static Boolean apply(Object input) { - if (input == null) { - return null; - } - - if ((input instanceof Boolean) == false) { - throw new QlIllegalArgumentException("A boolean is required; received {}", input); - } - - return ((Boolean) input).booleanValue() ? Boolean.FALSE : Boolean.TRUE; - } - - @Override - public int hashCode() { - return NotProcessor.class.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - return obj == null || getClass() != obj.getClass(); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Or.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Or.java index 16781426d2323..bf7a16aec8df9 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Or.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/Or.java @@ -6,19 +6,32 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.logical; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogicProcessor.BinaryLogicOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; + public class Or extends BinaryLogic implements Negatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Or", Or::new); public Or(Source source, Expression left, Expression right) { super(source, left, right, BinaryLogicOperation.OR); } + private Or(StreamInput in) throws IOException { + super(in, BinaryLogicOperation.OR); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Or::new, left(), right()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/CheckNullProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/CheckNullProcessor.java deleted file mode 100644 index 10503fcd00178..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/CheckNullProcessor.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.nulls; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; -import java.util.Objects; -import java.util.function.Predicate; - -public class CheckNullProcessor implements Processor { - - public enum CheckNullOperation implements Predicate { - - IS_NULL(Objects::isNull, "IS NULL"), - IS_NOT_NULL(Objects::nonNull, "IS NOT NULL"); - - private final Predicate process; - private final String symbol; - - CheckNullOperation(Predicate process, String symbol) { - this.process = process; - this.symbol = symbol; - } - - public String symbol() { - return symbol; - } - - @Override - public String toString() { - return symbol; - } - - @Override - public boolean test(Object o) { - return process.test(o); - } - } - - public static final String NAME = "nckn"; - - private final CheckNullOperation operation; - - CheckNullProcessor(CheckNullOperation operation) { - this.operation = operation; - } - - public CheckNullProcessor(StreamInput in) throws IOException { - this(in.readEnum(CheckNullOperation.class)); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(operation); - } - - @Override - public Object process(Object input) { - return operation.test(input); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CheckNullProcessor that = (CheckNullProcessor) o; - return operation == that.operation; - } - - @Override - public int hashCode() { - return Objects.hash(operation); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java index e365480a6fd79..9879a1f5ffc29 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNotNull.java @@ -11,9 +11,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.CheckNullProcessor.CheckNullOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -55,11 +53,6 @@ public Object fold() { return field().fold() != null && DataType.isNull(field().dataType()) == false; } - @Override - protected Processor makeProcessor() { - return new CheckNullProcessor(CheckNullOperation.IS_NOT_NULL); - } - @Override public Nullability nullable() { return Nullability.FALSE; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java index 8b6eb5d4404b0..d88945045b03e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/IsNull.java @@ -11,9 +11,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.CheckNullProcessor.CheckNullOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -51,11 +49,6 @@ public Object fold() { return field().fold() == null || DataType.isNull(field().dataType()); } - @Override - protected Processor makeProcessor() { - return new CheckNullProcessor(CheckNullOperation.IS_NULL); - } - @Override public Nullability nullable() { return Nullability.FALSE; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java deleted file mode 100644 index 73e3ed560d6fa..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.FunctionalBinaryProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; - -public final class BinaryArithmeticProcessor extends FunctionalBinaryProcessor { - - public static final String NAME = "abn"; - - public BinaryArithmeticProcessor(Processor left, Processor right, BinaryArithmeticOperation operation) { - super(left, right, operation); - } - - public BinaryArithmeticProcessor(StreamInput in) throws IOException { - super(in, i -> i.readNamedWriteable(BinaryArithmeticOperation.class)); - } - - @Override - protected void doWrite(StreamOutput out) throws IOException { - out.writeNamedWriteable(function()); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Object doProcess(Object left, Object right) { - BinaryArithmeticOperation f = function(); - - if (left == null || right == null) { - return null; - } - - return f.apply(left, right); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/Neg.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/Neg.java index c7cb2bb3e3832..9a8a14f320cd6 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/Neg.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/Neg.java @@ -6,14 +6,15 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.io.IOException; + import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isNumeric; @@ -26,6 +27,16 @@ public Neg(Source source, Expression field) { super(source, field); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Neg::new, field()); @@ -50,9 +61,4 @@ public Object fold() { public DataType dataType() { return field().dataType(); } - - @Override - protected Processor makeProcessor() { - return new UnaryArithmeticProcessor(UnaryArithmeticOperation.NEGATE); - } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/UnaryArithmeticProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/UnaryArithmeticProcessor.java deleted file mode 100644 index 835d1a7366486..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/arithmetic/UnaryArithmeticProcessor.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; -import java.util.function.Function; - -public class UnaryArithmeticProcessor implements Processor { - - public enum UnaryArithmeticOperation { - - NEGATE(Arithmetics::negate); - - private final Function process; - - UnaryArithmeticOperation(Function process) { - this.process = process; - } - - public final Number apply(Number number) { - return process.apply(number); - } - - public String symbol() { - return "-"; - } - } - - public static final String NAME = "au"; - - private final UnaryArithmeticOperation operation; - - public UnaryArithmeticProcessor(UnaryArithmeticOperation operation) { - this.operation = operation; - } - - public UnaryArithmeticProcessor(StreamInput in) throws IOException { - operation = in.readEnum(UnaryArithmeticOperation.class); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(operation); - } - - @Override - public Object process(Object input) { - if (input == null) { - return null; - } - - if (input instanceof Number number) { - return operation.apply(number); - } - throw new QlIllegalArgumentException("A number is required; received {}", input); - } - - @Override - public String toString() { - return operation.symbol() + super.toString(); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparison.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparison.java index 193b77f2344c0..3f0f817c3c3d4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparison.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparison.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparisonOperation.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparisonOperation.java new file mode 100644 index 0000000000000..efe8a7a8cf615 --- /dev/null +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparisonOperation.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.predicate.PredicateBiFunction; + +import java.util.function.BiFunction; + +public enum BinaryComparisonOperation implements PredicateBiFunction { + + EQ(Comparisons::eq, "=="), + NULLEQ(Comparisons::nulleq, "<=>"), + NEQ(Comparisons::neq, "!="), + GT(Comparisons::gt, ">"), + GTE(Comparisons::gte, ">="), + LT(Comparisons::lt, "<"), + LTE(Comparisons::lte, "<="); + + private final BiFunction process; + private final String symbol; + + BinaryComparisonOperation(BiFunction process, String symbol) { + this.process = process; + this.symbol = symbol; + } + + @Override + public String symbol() { + return symbol; + } + + @Override + public Boolean apply(Object left, Object right) { + if (this != NULLEQ && (left == null || right == null)) { + return null; + } + return doApply(left, right); + } + + @Override + public final Boolean doApply(Object left, Object right) { + return process.apply(left, right); + } + + @Override + public String toString() { + return symbol; + } +} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparisonProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparisonProcessor.java deleted file mode 100644 index 6434f2d9b6ac2..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/BinaryComparisonProcessor.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.FunctionalEnumBinaryProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; -import org.elasticsearch.xpack.esql.core.expression.predicate.PredicateBiFunction; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; - -import java.io.IOException; -import java.util.function.BiFunction; - -public class BinaryComparisonProcessor extends FunctionalEnumBinaryProcessor { - - public enum BinaryComparisonOperation implements PredicateBiFunction { - - EQ(Comparisons::eq, "=="), - NULLEQ(Comparisons::nulleq, "<=>"), - NEQ(Comparisons::neq, "!="), - GT(Comparisons::gt, ">"), - GTE(Comparisons::gte, ">="), - LT(Comparisons::lt, "<"), - LTE(Comparisons::lte, "<="); - - private final BiFunction process; - private final String symbol; - - BinaryComparisonOperation(BiFunction process, String symbol) { - this.process = process; - this.symbol = symbol; - } - - @Override - public String symbol() { - return symbol; - } - - @Override - public Boolean apply(Object left, Object right) { - if (this != NULLEQ && (left == null || right == null)) { - return null; - } - return doApply(left, right); - } - - @Override - public final Boolean doApply(Object left, Object right) { - return process.apply(left, right); - } - - @Override - public String toString() { - return symbol; - } - } - - public static final String NAME = "cb"; - - public BinaryComparisonProcessor(Processor left, Processor right, BinaryComparisonOperation operation) { - super(left, right, operation); - } - - public BinaryComparisonProcessor(StreamInput in) throws IOException { - super(in, i -> i.readEnum(BinaryComparisonOperation.class)); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public Object process(Object input) { - if (function() == BinaryComparisonOperation.NULLEQ) { - return doProcess(left().process(input), right().process(input)); - } - return super.process(input); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/Equals.java index ba4816e3b68fe..533ce4b76b595 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/Equals.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.ZoneId; public class Equals extends BinaryComparison implements Negatable { @@ -24,6 +25,16 @@ public Equals(Source source, Expression left, Expression right, ZoneId zoneId) { super(source, left, right, BinaryComparisonOperation.EQ, zoneId); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Equals::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThan.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThan.java index 4e3880defdd79..f4ffa1a12ae5b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThan.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThan.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.ZoneId; public class GreaterThan extends BinaryComparison implements Negatable { @@ -20,6 +21,16 @@ public GreaterThan(Source source, Expression left, Expression right, ZoneId zone super(source, left, right, BinaryComparisonOperation.GT, zoneId); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, GreaterThan::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThanOrEqual.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThanOrEqual.java index 2132a028c4d79..28aa4124f0987 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThanOrEqual.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/GreaterThanOrEqual.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.ZoneId; public class GreaterThanOrEqual extends BinaryComparison implements Negatable { @@ -20,6 +21,16 @@ public GreaterThanOrEqual(Source source, Expression left, Expression right, Zone super(source, left, right, BinaryComparisonOperation.GTE, zoneId); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, GreaterThanOrEqual::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/In.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/In.java index 21fbfa56b0d98..bd645064289a5 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/In.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Foldables; @@ -18,6 +19,7 @@ import org.elasticsearch.xpack.esql.core.type.DataTypeConverter; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; +import java.io.IOException; import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; @@ -46,6 +48,16 @@ public In(Source source, Expression value, List list, ZoneId zoneId) this.zoneId = zoneId; } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, In::new, value(), list(), zoneId()); @@ -89,7 +101,20 @@ public Boolean fold() { if (Expressions.isNull(value) || list.size() == 1 && Expressions.isNull(list.get(0))) { return null; } - return InProcessor.apply(value.fold(), foldAndConvertListOfValues(list, value.dataType())); + return apply(value.fold(), foldAndConvertListOfValues(list, value.dataType())); + } + + private static Boolean apply(Object input, List values) { + Boolean result = Boolean.FALSE; + for (Object v : values) { + Boolean compResult = Comparisons.eq(input, v); + if (compResult == null) { + result = null; + } else if (compResult == Boolean.TRUE) { + return Boolean.TRUE; + } + } + return result; } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/InProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/InProcessor.java deleted file mode 100644 index 61d33ab631bfb..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/InProcessor.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -public class InProcessor implements Processor { - - public static final String NAME = "in"; - - private final List processsors; - - InProcessor(List processors) { - this.processsors = processors; - } - - public InProcessor(StreamInput in) throws IOException { - processsors = in.readNamedWriteableCollectionAsList(Processor.class); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public final void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableCollection(processsors); - } - - @Override - public Object process(Object input) { - Object leftValue = processsors.get(processsors.size() - 1).process(input); - return apply(leftValue, process(processsors.subList(0, processsors.size() - 1), leftValue)); - } - - private static List process(List processors, Object input) { - List values = new ArrayList<>(processors.size()); - for (Processor p : processors) { - values.add(p.process(input)); - } - return values; - } - - public static Boolean apply(Object input, List values) { - Boolean result = Boolean.FALSE; - for (Object v : values) { - Boolean compResult = Comparisons.eq(input, v); - if (compResult == null) { - result = null; - } else if (compResult == Boolean.TRUE) { - return Boolean.TRUE; - } - } - return result; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - InProcessor that = (InProcessor) o; - return Objects.equals(processsors, that.processsors); - } - - @Override - public int hashCode() { - return Objects.hash(processsors); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThan.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThan.java index c7985548918f9..150db16521480 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThan.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThan.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.ZoneId; public class LessThan extends BinaryComparison implements Negatable { @@ -20,6 +21,16 @@ public LessThan(Source source, Expression left, Expression right, ZoneId zoneId) super(source, left, right, BinaryComparisonOperation.LT, zoneId); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, LessThan::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThanOrEqual.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThanOrEqual.java index ff87d02cd654a..a0e5abd4317b3 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThanOrEqual.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/LessThanOrEqual.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.ZoneId; public class LessThanOrEqual extends BinaryComparison implements Negatable { @@ -20,6 +21,16 @@ public LessThanOrEqual(Source source, Expression left, Expression right, ZoneId super(source, left, right, BinaryComparisonOperation.LTE, zoneId); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, LessThanOrEqual::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NotEquals.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NotEquals.java index 936e684ab37c6..6d52195ec9452 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NotEquals.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NotEquals.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.ZoneId; public class NotEquals extends BinaryComparison implements Negatable { @@ -20,6 +21,16 @@ public NotEquals(Source source, Expression left, Expression right, ZoneId zoneId super(source, left, right, BinaryComparisonOperation.NEQ, zoneId); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, NotEquals::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NullEquals.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NullEquals.java index 0b135d380f621..bb2196a5ae3b9 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NullEquals.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/operator/comparison/NullEquals.java @@ -6,12 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.time.ZoneId; /** @@ -23,6 +24,16 @@ public NullEquals(Source source, Expression left, Expression right, ZoneId zoneI super(source, left, right, BinaryComparisonOperation.NULLEQ, zoneId); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, NullEquals::new, left(), right(), zoneId()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/Like.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/Like.java index 84ed88da0fe42..6d8ce8cbdf47f 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/Like.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/Like.java @@ -6,16 +6,29 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.regex; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; + public class Like extends RegexMatch { public Like(Source source, Expression left, LikePattern pattern) { this(source, left, pattern, false); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + public Like(Source source, Expression left, LikePattern pattern, boolean caseInsensitive) { super(source, left, pattern, caseInsensitive); } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLike.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLike.java index 8020491c50212..5f095a654fc89 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLike.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RLike.java @@ -6,10 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.regex; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; + public class RLike extends RegexMatch { public RLike(Source source, Expression value, RLikePattern pattern) { @@ -20,6 +23,16 @@ public RLike(Source source, Expression field, RLikePattern rLikePattern, boolean super(source, field, rLikePattern, caseInsensitive); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, RLike::new, field(), pattern(), caseInsensitive()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexMatch.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexMatch.java index 4e7e70685dc3a..32e8b04573d2d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexMatch.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexMatch.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -69,12 +68,7 @@ public Boolean fold() { if (val instanceof BytesRef br) { val = br.utf8ToString(); } - return RegexProcessor.RegexOperation.match(val, pattern().asJavaRegex()); - } - - @Override - protected Processor makeProcessor() { - return new RegexProcessor(pattern().asJavaRegex()); + return RegexOperation.match(val, pattern().asJavaRegex()); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexOperation.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexOperation.java new file mode 100644 index 0000000000000..1501ae65ed485 --- /dev/null +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexOperation.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.expression.predicate.regex; + +import java.util.regex.Pattern; + +public class RegexOperation { + + public static Boolean match(Object value, Pattern pattern) { + if (pattern == null) { + return Boolean.TRUE; + } + + if (value == null) { + return null; + } + + return pattern.matcher(value.toString()).matches(); + } + + public static Boolean match(Object value, String pattern) { + return match(value, pattern, Boolean.FALSE); + } + + public static Boolean match(Object value, String pattern, Boolean caseInsensitive) { + if (pattern == null) { + return Boolean.TRUE; + } + + if (value == null) { + return null; + } + + int flags = 0; + if (Boolean.TRUE.equals(caseInsensitive)) { + flags |= Pattern.CASE_INSENSITIVE; + } + return Pattern.compile(pattern, flags).matcher(value.toString()).matches(); + } +} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexProcessor.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexProcessor.java deleted file mode 100644 index 41b0ab406bf89..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/RegexProcessor.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.regex; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; - -import java.io.IOException; -import java.util.Objects; -import java.util.regex.Pattern; - -public class RegexProcessor implements Processor { - - public static class RegexOperation { - - public static Boolean match(Object value, Pattern pattern) { - if (pattern == null) { - return Boolean.TRUE; - } - - if (value == null) { - return null; - } - - return pattern.matcher(value.toString()).matches(); - } - - public static Boolean match(Object value, String pattern) { - return match(value, pattern, Boolean.FALSE); - } - - public static Boolean match(Object value, String pattern, Boolean caseInsensitive) { - if (pattern == null) { - return Boolean.TRUE; - } - - if (value == null) { - return null; - } - - int flags = 0; - if (Boolean.TRUE.equals(caseInsensitive)) { - flags |= Pattern.CASE_INSENSITIVE; - } - return Pattern.compile(pattern, flags).matcher(value.toString()).matches(); - } - } - - public static final String NAME = "rgx"; - - private Pattern pattern; - - public RegexProcessor(String pattern) { - this.pattern = pattern != null ? Pattern.compile(pattern) : null; - } - - @Override - public String getWriteableName() { - return NAME; - } - - public RegexProcessor(StreamInput in) throws IOException { - this(in.readOptionalString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(pattern != null ? pattern.toString() : null); - } - - @Override - public Object process(Object input) { - return RegexOperation.match(input, pattern); - } - - @Override - public int hashCode() { - return Objects.hash(pattern); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - RegexProcessor other = (RegexProcessor) obj; - return Objects.equals(pattern, other.pattern); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/WildcardLike.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/WildcardLike.java index 8834c1a0211b4..bf54744667217 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/WildcardLike.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/regex/WildcardLike.java @@ -6,10 +6,13 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.regex; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; + public class WildcardLike extends RegexMatch { public WildcardLike(Source source, Expression left, WildcardPattern pattern) { @@ -20,6 +23,16 @@ public WildcardLike(Source source, Expression left, WildcardPattern pattern, boo super(source, left, pattern, caseInsensitive); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, WildcardLike::new, field(), pattern(), caseInsensitive()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/processor/Processors.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/processor/Processors.java deleted file mode 100644 index f72fdb7e43fb6..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/processor/Processors.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.processor; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.BucketExtractorProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.ChainingProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.ConstantProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.HitExtractorProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogicProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.NotProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.BinaryArithmeticOperation; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.BinaryArithmeticProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.DefaultBinaryArithmeticOperation; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RegexProcessor; -import org.elasticsearch.xpack.esql.core.type.Converter; -import org.elasticsearch.xpack.esql.core.type.DataTypeConverter.DefaultConverter; - -import java.util.ArrayList; -import java.util.List; - -public final class Processors { - - private Processors() {} - - /** - * All of the named writeables needed to deserialize the instances of - * {@linkplain Processors}. - */ - public static List getNamedWriteables() { - List entries = new ArrayList<>(); - - // base - entries.add(new Entry(Converter.class, DefaultConverter.NAME, DefaultConverter::read)); - - entries.add(new Entry(Processor.class, ConstantProcessor.NAME, ConstantProcessor::new)); - entries.add(new Entry(Processor.class, HitExtractorProcessor.NAME, HitExtractorProcessor::new)); - entries.add(new Entry(Processor.class, BucketExtractorProcessor.NAME, BucketExtractorProcessor::new)); - entries.add(new Entry(Processor.class, ChainingProcessor.NAME, ChainingProcessor::new)); - - // logical - entries.add(new Entry(Processor.class, BinaryLogicProcessor.NAME, BinaryLogicProcessor::new)); - entries.add(new Entry(Processor.class, NotProcessor.NAME, NotProcessor::new)); - - // arithmetic - // binary arithmetics are pluggable - entries.add( - new Entry(BinaryArithmeticOperation.class, DefaultBinaryArithmeticOperation.NAME, DefaultBinaryArithmeticOperation::read) - ); - entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new)); - entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new)); - // comparators - entries.add(new Entry(Processor.class, BinaryComparisonProcessor.NAME, BinaryComparisonProcessor::new)); - // regex - entries.add(new Entry(Processor.class, RegexProcessor.NAME, RegexProcessor::new)); - - return entries; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/parser/CaseChangingCharStream.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/parser/CaseChangingCharStream.java index f38daa472ddff..6248004d73dac 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/parser/CaseChangingCharStream.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/parser/CaseChangingCharStream.java @@ -18,27 +18,24 @@ /** * This class supports case-insensitive lexing by wrapping an existing - * {@link CharStream} and forcing the lexer to see either upper or - * lowercase characters. Grammar literals should then be either upper or - * lower case such as 'BEGIN' or 'begin'. The text of the character - * stream is unaffected. Example: input 'BeGiN' would match lexer rule - * 'BEGIN' if constructor parameter upper=true but getText() would return - * 'BeGiN'. + * {@link CharStream} and forcing the lexer to see lowercase characters + * Grammar literals should then be lower case such as {@code begin}. + * The text of the character stream is unaffected. + *

      Example: input {@code BeGiN} would match lexer rule {@code begin} + * but {@link CharStream#getText} will return {@code BeGiN}. + *

      */ public class CaseChangingCharStream implements CharStream { private final CharStream stream; - private final boolean upper; /** * Constructs a new CaseChangingCharStream wrapping the given {@link CharStream} forcing * all characters to upper case or lower case. * @param stream The stream to wrap. - * @param upper If true force each symbol to upper case, otherwise force to lower. */ - public CaseChangingCharStream(CharStream stream, boolean upper) { + public CaseChangingCharStream(CharStream stream) { this.stream = stream; - this.upper = upper; } @Override @@ -57,7 +54,7 @@ public int LA(int i) { if (c <= 0) { return c; } - return upper ? Character.toUpperCase(c) : Character.toLowerCase(c); + return Character.toLowerCase(c); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Aggregate.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Aggregate.java deleted file mode 100644 index 3fcfd61e21b45..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Aggregate.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.plan.logical; - -import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; -import java.util.Objects; - -public class Aggregate extends UnaryPlan { - - private final List groupings; - private final List aggregates; - - public Aggregate(Source source, LogicalPlan child, List groupings, List aggregates) { - super(source, child); - this.groupings = groupings; - this.aggregates = aggregates; - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, Aggregate::new, child(), groupings, aggregates); - } - - @Override - public Aggregate replaceChild(LogicalPlan newChild) { - return new Aggregate(source(), newChild, groupings, aggregates); - } - - public List groupings() { - return groupings; - } - - public List aggregates() { - return aggregates; - } - - @Override - public boolean expressionsResolved() { - return Resolvables.resolved(groupings) && Resolvables.resolved(aggregates); - } - - @Override - public List output() { - return Expressions.asAttributes(aggregates); - } - - @Override - public int hashCode() { - return Objects.hash(groupings, aggregates, child()); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - Aggregate other = (Aggregate) obj; - return Objects.equals(groupings, other.groupings) - && Objects.equals(aggregates, other.aggregates) - && Objects.equals(child(), other.child()); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/EsRelation.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/EsRelation.java deleted file mode 100644 index 2998988837253..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/EsRelation.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.plan.logical; - -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.index.EsIndex; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.NodeUtils; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.EsField; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; - -public class EsRelation extends LeafPlan { - - private final EsIndex index; - private final List attrs; - private final boolean frozen; - - public EsRelation(Source source, EsIndex index, boolean frozen) { - this(source, index, flatten(source, index.mapping()), frozen); - } - - public EsRelation(Source source, EsIndex index, List attributes) { - this(source, index, attributes, false); - } - - public EsRelation(Source source, EsIndex index, List attributes, boolean frozen) { - super(source); - this.index = index; - this.attrs = attributes; - this.frozen = frozen; - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, EsRelation::new, index, attrs, frozen); - } - - private static List flatten(Source source, Map mapping) { - return flatten(source, mapping, null); - } - - private static List flatten(Source source, Map mapping, FieldAttribute parent) { - List list = new ArrayList<>(); - - for (Entry entry : mapping.entrySet()) { - String name = entry.getKey(); - EsField t = entry.getValue(); - - if (t != null) { - FieldAttribute f = new FieldAttribute(source, parent, parent != null ? parent.name() + "." + name : name, t); - list.add(f); - // object or nested - if (t.getProperties().isEmpty() == false) { - list.addAll(flatten(source, t.getProperties(), f)); - } - } - } - return list; - } - - public EsIndex index() { - return index; - } - - public boolean frozen() { - return frozen; - } - - @Override - public List output() { - return attrs; - } - - @Override - public boolean expressionsResolved() { - return true; - } - - @Override - public int hashCode() { - return Objects.hash(index, frozen); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - EsRelation other = (EsRelation) obj; - return Objects.equals(index, other.index) && frozen == other.frozen; - } - - @Override - public String nodeString() { - return nodeName() + "[" + index + "]" + NodeUtils.limitedToString(attrs); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Project.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Project.java deleted file mode 100644 index b9070f546d8de..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Project.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.plan.logical; - -import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.expression.function.Functions; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; -import java.util.Objects; - -/** - * A {@code Project} is a {@code Plan} with one child. In {@code SELECT x FROM y}, the "SELECT" statement is a Project. - */ -public class Project extends UnaryPlan { - - private final List projections; - - public Project(Source source, LogicalPlan child, List projections) { - super(source, child); - this.projections = projections; - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, Project::new, child(), projections); - } - - @Override - public Project replaceChild(LogicalPlan newChild) { - return new Project(source(), newChild, projections); - } - - public List projections() { - return projections; - } - - public Project withProjections(List projections) { - return new Project(source(), child(), projections); - } - - @Override - public boolean resolved() { - return super.resolved() && Expressions.anyMatch(projections, Functions::isAggregate) == false; - } - - @Override - public boolean expressionsResolved() { - return Resolvables.resolved(projections); - } - - @Override - public List output() { - return Expressions.asAttributes(projections); - } - - @Override - public int hashCode() { - return Objects.hash(projections, child()); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - Project other = (Project) obj; - - return Objects.equals(projections, other.projections) && Objects.equals(child(), other.child()); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/UnresolvedRelation.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/UnresolvedRelation.java deleted file mode 100644 index d969ad02a4eac..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/UnresolvedRelation.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.plan.logical; - -import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.Collections; -import java.util.List; -import java.util.Objects; - -import static java.util.Collections.singletonList; - -public class UnresolvedRelation extends LeafPlan implements Unresolvable { - - private final TableIdentifier table; - private final boolean frozen; - private final String alias; - private final String unresolvedMsg; - - public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen) { - this(source, table, alias, frozen, null); - } - - public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen, String unresolvedMessage) { - super(source); - this.table = table; - this.alias = alias; - this.frozen = frozen; - this.unresolvedMsg = unresolvedMessage == null ? "Unknown index [" + table.index() + "]" : unresolvedMessage; - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, UnresolvedRelation::new, table, alias, frozen, unresolvedMsg); - } - - public TableIdentifier table() { - return table; - } - - public String alias() { - return alias; - } - - public boolean frozen() { - return frozen; - } - - @Override - public boolean resolved() { - return false; - } - - @Override - public boolean expressionsResolved() { - return false; - } - - @Override - public List output() { - return Collections.emptyList(); - } - - @Override - public String unresolvedMessage() { - return unresolvedMsg; - } - - @Override - public int hashCode() { - return Objects.hash(source(), table, alias, unresolvedMsg); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - UnresolvedRelation other = (UnresolvedRelation) obj; - return Objects.equals(table, other.table) - && Objects.equals(alias, other.alias) - && Objects.equals(frozen, other.frozen) - && Objects.equals(unresolvedMsg, other.unresolvedMsg); - } - - @Override - public List nodeProperties() { - return singletonList(table); - } - - @Override - public String toString() { - return UNRESOLVED_PREFIX + table.index(); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/AbstractTransportQlAsyncGetStatusAction.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/AbstractTransportQlAsyncGetStatusAction.java deleted file mode 100644 index cb21272758d1b..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/AbstractTransportQlAsyncGetStatusAction.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.plugin; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.async.AsyncExecutionId; -import org.elasticsearch.xpack.core.async.AsyncTaskIndexService; -import org.elasticsearch.xpack.core.async.GetAsyncStatusRequest; -import org.elasticsearch.xpack.core.async.StoredAsyncResponse; -import org.elasticsearch.xpack.core.async.StoredAsyncTask; -import org.elasticsearch.xpack.esql.core.async.QlStatusResponse; - -import java.util.Objects; - -import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; - -public abstract class AbstractTransportQlAsyncGetStatusAction< - Response extends ActionResponse & QlStatusResponse.AsyncStatus, - AsyncTask extends StoredAsyncTask> extends HandledTransportAction { - private final String actionName; - private final TransportService transportService; - private final ClusterService clusterService; - private final Class asyncTaskClass; - private final AsyncTaskIndexService> store; - - @SuppressWarnings("this-escape") - public AbstractTransportQlAsyncGetStatusAction( - String actionName, - TransportService transportService, - ActionFilters actionFilters, - ClusterService clusterService, - NamedWriteableRegistry registry, - Client client, - ThreadPool threadPool, - BigArrays bigArrays, - Class asyncTaskClass - ) { - super(actionName, transportService, actionFilters, GetAsyncStatusRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); - this.actionName = actionName; - this.transportService = transportService; - this.clusterService = clusterService; - this.asyncTaskClass = asyncTaskClass; - Writeable.Reader> reader = in -> new StoredAsyncResponse<>(responseReader(), in); - this.store = new AsyncTaskIndexService<>( - XPackPlugin.ASYNC_RESULTS_INDEX, - clusterService, - threadPool.getThreadContext(), - client, - ASYNC_SEARCH_ORIGIN, - reader, - registry, - bigArrays - ); - } - - @Override - protected void doExecute(Task task, GetAsyncStatusRequest request, ActionListener listener) { - AsyncExecutionId searchId = AsyncExecutionId.decode(request.getId()); - DiscoveryNode node = clusterService.state().nodes().get(searchId.getTaskId().getNodeId()); - DiscoveryNode localNode = clusterService.state().getNodes().getLocalNode(); - if (node == null || Objects.equals(node, localNode)) { - store.retrieveStatus( - request, - taskManager, - asyncTaskClass, - AbstractTransportQlAsyncGetStatusAction::getStatusResponse, - QlStatusResponse::getStatusFromStoredSearch, - listener - ); - } else { - transportService.sendRequest( - node, - actionName, - request, - new ActionListenerResponseHandler<>(listener, QlStatusResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) - ); - } - } - - private static QlStatusResponse getStatusResponse(StoredAsyncTask asyncTask) { - return new QlStatusResponse( - asyncTask.getExecutionId().getEncoded(), - true, - true, - asyncTask.getStartTime(), - asyncTask.getExpirationTimeMillis(), - null - ); - } - - protected abstract Writeable.Reader responseReader(); -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/TransportActionUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/TransportActionUtils.java deleted file mode 100644 index 4d6fc9d1d18d5..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/TransportActionUtils.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.plugin; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.VersionMismatchException; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.xpack.esql.core.util.Holder; - -import java.util.function.Consumer; - -public final class TransportActionUtils { - - /** - * Execute a *QL request and re-try it in case the first request failed with a {@code VersionMismatchException} - * - * @param clusterService The cluster service instance - * @param onFailure On-failure handler in case the request doesn't fail with a {@code VersionMismatchException} - * @param queryRunner *QL query execution code, typically a Plan Executor running the query - * @param retryRequest Re-trial logic - * @param log Log4j logger - */ - public static void executeRequestWithRetryAttempt( - ClusterService clusterService, - Consumer onFailure, - Consumer> queryRunner, - Consumer retryRequest, - Logger log - ) { - - Holder retrySecondTime = new Holder(false); - queryRunner.accept(e -> { - // the search request likely ran on nodes with different versions of ES - // we will retry on a node with an older version that should generate a backwards compatible _search request - if (e instanceof SearchPhaseExecutionException - && ((SearchPhaseExecutionException) e).getCause() instanceof VersionMismatchException) { - if (log.isDebugEnabled()) { - log.debug("Caught exception type [{}] with cause [{}].", e.getClass().getName(), e.getCause()); - } - DiscoveryNode localNode = clusterService.state().nodes().getLocalNode(); - DiscoveryNode candidateNode = null; - for (DiscoveryNode node : clusterService.state().nodes()) { - // find the first node that's older than the current node - if (node != localNode && node.getVersion().before(localNode.getVersion())) { - candidateNode = node; - break; - } - } - if (candidateNode != null) { - if (log.isDebugEnabled()) { - log.debug( - "Candidate node to resend the request to: address [{}], id [{}], name [{}], version [{}]", - candidateNode.getAddress(), - candidateNode.getId(), - candidateNode.getName(), - candidateNode.getVersion() - ); - } - // re-send the request to the older node - retryRequest.accept(candidateNode); - } else { - retrySecondTime.set(true); - } - } else { - onFailure.accept(e); - } - }); - if (retrySecondTime.get()) { - if (log.isDebugEnabled()) { - log.debug("No candidate node found, likely all were upgraded in the meantime. Re-trying the original request."); - } - queryRunner.accept(onFailure); - } - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Node.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Node.java index f7561d0c2b34b..b1fc7d59c784d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Node.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/tree/Node.java @@ -254,7 +254,7 @@ public T transformPropertiesUp(Class typeToken, Function T transformNodeProps(Class typeToken, Function rule) { return info().transform(rule, typeToken); @@ -262,6 +262,15 @@ protected final T transformNodeProps(Class typeToken, Function + * Normally, you want to use one of the static {@code create} methods to implement this. + *

      + * For {@link org.elasticsearch.xpack.esql.core.plan.QueryPlan}s, it is very important that + * the properties contain all of the expressions and references relevant to this node, and + * that all of the properties are used in the provided constructor; otherwise query plan + * transformations like + * {@link org.elasticsearch.xpack.esql.core.plan.QueryPlan#transformExpressionsOnly(Function)} + * will not have an effect. */ protected abstract NodeInfo info(); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 7f48751535ba9..503c076b4f7a2 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -14,6 +14,7 @@ import java.io.IOException; import java.math.BigInteger; import java.time.ZonedDateTime; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -21,38 +22,14 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.stream.Stream; import static java.util.stream.Collectors.toMap; import static java.util.stream.Collectors.toUnmodifiableMap; public enum DataType { - UNSUPPORTED("UNSUPPORTED", null, 0, false, false, false), - NULL("null", 0, false, false, false), - BOOLEAN("boolean", 1, false, false, false), - BYTE("byte", Byte.BYTES, true, false, true), - SHORT("short", Short.BYTES, true, false, true), - INTEGER("integer", Integer.BYTES, true, false, true), - LONG("long", Long.BYTES, true, false, true), - UNSIGNED_LONG("unsigned_long", Long.BYTES, true, false, true), - DOUBLE("double", Double.BYTES, false, true, true), - FLOAT("float", Float.BYTES, false, true, true), - HALF_FLOAT("half_float", Float.BYTES, false, true, true), - SCALED_FLOAT("scaled_float", Long.BYTES, false, true, true), - KEYWORD("keyword", Integer.MAX_VALUE, false, false, true), - TEXT("text", Integer.MAX_VALUE, false, false, false), - DATETIME("DATETIME", "date", Long.BYTES, false, false, true), - IP("ip", 45, false, false, true), - VERSION("version", Integer.MAX_VALUE, false, false, true), - OBJECT("object", 0, false, false, false), - NESTED("nested", 0, false, false, false), - SOURCE(SourceFieldMapper.NAME, SourceFieldMapper.NAME, Integer.MAX_VALUE, false, false, false), - DATE_PERIOD("DATE_PERIOD", null, 3 * Integer.BYTES, false, false, false), - TIME_DURATION("TIME_DURATION", null, Integer.BYTES + Long.BYTES, false, false, false), - GEO_POINT("geo_point", Double.BYTES * 2, false, false, true), - CARTESIAN_POINT("cartesian_point", Double.BYTES * 2, false, false, true), - CARTESIAN_SHAPE("cartesian_shape", Integer.MAX_VALUE, false, false, true), - GEO_SHAPE("geo_shape", Integer.MAX_VALUE, false, false, true), + UNSUPPORTED(builder().typeName("UNSUPPORTED")), + NULL(builder().esType("null")), + BOOLEAN(builder().esType("boolean").size(1)), /** * These are numeric fields labeled as metric counters in time-series indices. Although stored @@ -61,11 +38,38 @@ public enum DataType { * These fields are strictly for use in retrieval from indices, rate aggregation, and casting to their * parent numeric type. */ - COUNTER_LONG("counter_long", Long.BYTES, false, false, true), - COUNTER_INTEGER("counter_integer", Integer.BYTES, false, false, true), - COUNTER_DOUBLE("counter_double", Double.BYTES, false, false, true), - DOC_DATA_TYPE("_doc", Integer.BYTES * 3, false, false, false), - TSID_DATA_TYPE("_tsid", Integer.MAX_VALUE, false, false, true); + COUNTER_LONG(builder().esType("counter_long").size(Long.BYTES).docValues().counter()), + COUNTER_INTEGER(builder().esType("counter_integer").size(Integer.BYTES).docValues().counter()), + COUNTER_DOUBLE(builder().esType("counter_double").size(Double.BYTES).docValues().counter()), + + LONG(builder().esType("long").size(Long.BYTES).wholeNumber().docValues().counter(COUNTER_LONG)), + INTEGER(builder().esType("integer").size(Integer.BYTES).wholeNumber().docValues().counter(COUNTER_INTEGER)), + SHORT(builder().esType("short").size(Short.BYTES).wholeNumber().docValues().widenSmallNumeric(INTEGER)), + BYTE(builder().esType("byte").size(Byte.BYTES).wholeNumber().docValues().widenSmallNumeric(INTEGER)), + UNSIGNED_LONG(builder().esType("unsigned_long").size(Long.BYTES).wholeNumber().docValues()), + DOUBLE(builder().esType("double").size(Double.BYTES).rationalNumber().docValues().counter(COUNTER_DOUBLE)), + FLOAT(builder().esType("float").size(Float.BYTES).rationalNumber().docValues().widenSmallNumeric(DOUBLE)), + HALF_FLOAT(builder().esType("half_float").size(Float.BYTES).rationalNumber().docValues().widenSmallNumeric(DOUBLE)), + SCALED_FLOAT(builder().esType("scaled_float").size(Long.BYTES).rationalNumber().docValues().widenSmallNumeric(DOUBLE)), + + KEYWORD(builder().esType("keyword").unknownSize().docValues()), + TEXT(builder().esType("text").unknownSize()), + DATETIME(builder().esType("date").typeName("DATETIME").size(Long.BYTES).docValues()), + IP(builder().esType("ip").size(45).docValues()), + VERSION(builder().esType("version").unknownSize().docValues()), + OBJECT(builder().esType("object")), + NESTED(builder().esType("nested")), + SOURCE(builder().esType(SourceFieldMapper.NAME).unknownSize()), + DATE_PERIOD(builder().typeName("DATE_PERIOD").size(3 * Integer.BYTES)), + TIME_DURATION(builder().typeName("TIME_DURATION").size(Integer.BYTES + Long.BYTES)), + GEO_POINT(builder().esType("geo_point").size(Double.BYTES * 2).docValues()), + CARTESIAN_POINT(builder().esType("cartesian_point").size(Double.BYTES * 2).docValues()), + CARTESIAN_SHAPE(builder().esType("cartesian_shape").unknownSize().docValues()), + GEO_SHAPE(builder().esType("geo_shape").unknownSize().docValues()), + + DOC_DATA_TYPE(builder().esType("_doc").size(Integer.BYTES * 3)), + TSID_DATA_TYPE(builder().esType("_tsid").unknownSize().docValues()), + PARTIAL_AGG(builder().esType("partial_agg").unknownSize()); private final String typeName; @@ -76,74 +80,66 @@ public enum DataType { private final int size; /** - * True if the type represents an integer number + * True if the type represents a "whole number", as in, does not have a decimal part. */ - private final boolean isInteger; + private final boolean isWholeNumber; /** - * True if the type represents a rational number + * True if the type represents a "rational number", as in, does have a decimal part. */ - private final boolean isRational; + private final boolean isRationalNumber; /** * True if the type supports doc values by default */ private final boolean docValues; - DataType(String esName, int size, boolean isInteger, boolean isRational, boolean hasDocValues) { - this(null, esName, size, isInteger, isRational, hasDocValues); - } + /** + * {@code true} if this is a TSDB counter, {@code false} otherwise. + */ + private final boolean isCounter; + + /** + * If this is a "small" numeric type this contains the type ESQL will + * widen it into, otherwise this is {@code null}. + */ + private final DataType widenSmallNumeric; - DataType(String typeName, String esType, int size, boolean isInteger, boolean isRational, boolean hasDocValues) { - String typeString = typeName != null ? typeName : esType; + /** + * If this is a representable numeric this will be the counter "version" + * of this numeric, otherwise this is {@code null}. + */ + private final DataType counter; + + DataType(Builder builder) { + String typeString = builder.typeName != null ? builder.typeName : builder.esType; this.typeName = typeString.toLowerCase(Locale.ROOT); this.name = typeString.toUpperCase(Locale.ROOT); - this.esType = esType; - this.size = size; - this.isInteger = isInteger; - this.isRational = isRational; - this.docValues = hasDocValues; - } - - private static final Collection TYPES = Stream.of( - UNSUPPORTED, - NULL, - BOOLEAN, - BYTE, - SHORT, - INTEGER, - LONG, - UNSIGNED_LONG, - DOUBLE, - FLOAT, - HALF_FLOAT, - SCALED_FLOAT, - KEYWORD, - TEXT, - DATETIME, - IP, - VERSION, - OBJECT, - NESTED, - SOURCE, - DATE_PERIOD, - TIME_DURATION, - GEO_POINT, - CARTESIAN_POINT, - CARTESIAN_SHAPE, - GEO_SHAPE, - COUNTER_LONG, - COUNTER_INTEGER, - COUNTER_DOUBLE - ).sorted(Comparator.comparing(DataType::typeName)).toList(); + this.esType = builder.esType; + this.size = builder.size; + this.isWholeNumber = builder.isWholeNumber; + this.isRationalNumber = builder.isRationalNumber; + this.docValues = builder.docValues; + this.isCounter = builder.isCounter; + this.widenSmallNumeric = builder.widenSmallNumeric; + this.counter = builder.counter; + } + + private static final Collection TYPES = Arrays.stream(values()) + .filter(d -> d != DOC_DATA_TYPE && d != TSID_DATA_TYPE) + .sorted(Comparator.comparing(DataType::typeName)) + .toList(); private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); - private static Map ES_TO_TYPE; + private static final Map ES_TO_TYPE; static { Map map = TYPES.stream().filter(e -> e.esType() != null).collect(toMap(DataType::esType, t -> t)); - map.put("date_nanos", DATETIME); + // TODO: Why don't we use the names ES uses as the esType field for these? + // ES calls this 'point', but ESQL calls it 'cartesian_point' + map.put("point", DataType.CARTESIAN_POINT); + map.put("shape", DataType.CARTESIAN_SHAPE); ES_TO_TYPE = Collections.unmodifiableMap(map); } @@ -258,16 +254,32 @@ public String esType() { return esType; } - public boolean isInteger() { - return isInteger; + /** + * The name we give to types on the response. + */ + public String outputType() { + return esType == null ? "unsupported" : esType; } - public boolean isRational() { - return isRational; + /** + * True if the type represents a "whole number", as in, does not have a decimal part. + */ + public boolean isWholeNumber() { + return isWholeNumber; } + /** + * True if the type represents a "rational number", as in, does have a decimal part. + */ + public boolean isRationalNumber() { + return isRationalNumber; + } + + /** + * Does this data type represent any number? + */ public boolean isNumeric() { - return isInteger || isRational; + return isWholeNumber || isRationalNumber; } public int size() { @@ -278,6 +290,29 @@ public boolean hasDocValues() { return docValues; } + /** + * {@code true} if this is a TSDB counter, {@code false} otherwise. + */ + public boolean isCounter() { + return isCounter; + } + + /** + * If this is a "small" numeric type this contains the type ESQL will + * widen it into, otherwise this returns {@code this}. + */ + public DataType widenSmallNumeric() { + return widenSmallNumeric == null ? this : widenSmallNumeric; + } + + /** + * If this is a representable numeric this will be the counter "version" + * of this numeric, otherwise this is {@code null}. + */ + public DataType counter() { + return counter; + } + public void writeTo(StreamOutput out) throws IOException { out.writeString(typeName); } @@ -303,4 +338,105 @@ public static DataType fromNameOrAlias(String typeName) { DataType type = NAME_OR_ALIAS_TO_TYPE.get(typeName.toLowerCase(Locale.ROOT)); return type != null ? type : UNSUPPORTED; } + + static Builder builder() { + return new Builder(); + } + + /** + * Named parameters with default values. It's just easier to do this with + * a builder in java.... + */ + private static class Builder { + private String esType; + + private String typeName; + + private int size; + + /** + * True if the type represents a "whole number", as in, does not have a decimal part. + */ + private boolean isWholeNumber; + + /** + * True if the type represents a "rational number", as in, does have a decimal part. + */ + private boolean isRationalNumber; + + /** + * True if the type supports doc values by default + */ + private boolean docValues; + + /** + * {@code true} if this is a TSDB counter, {@code false} otherwise. + */ + private boolean isCounter; + + /** + * If this is a "small" numeric type this contains the type ESQL will + * widen it into, otherwise this is {@code null}. + */ + private DataType widenSmallNumeric; + + /** + * If this is a representable numeric this will be the counter "version" + * of this numeric, otherwise this is {@code null}. + */ + private DataType counter; + + Builder() {} + + Builder esType(String esType) { + this.esType = esType; + return this; + } + + Builder typeName(String typeName) { + this.typeName = typeName; + return this; + } + + Builder size(int size) { + this.size = size; + return this; + } + + Builder unknownSize() { + this.size = Integer.MAX_VALUE; + return this; + } + + Builder wholeNumber() { + this.isWholeNumber = true; + return this; + } + + Builder rationalNumber() { + this.isRationalNumber = true; + return this; + } + + Builder docValues() { + this.docValues = true; + return this; + } + + Builder counter() { + this.isCounter = true; + return this; + } + + Builder widenSmallNumeric(DataType widenSmallNumeric) { + this.widenSmallNumeric = widenSmallNumeric; + return this; + } + + Builder counter(DataType counter) { + assert counter.isCounter; + this.counter = counter; + return this; + } + } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java index bb53472d06e71..bd87a92f3289d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java @@ -78,9 +78,9 @@ public static DataType commonType(DataType left, DataType right) { } if (left.isNumeric() && right.isNumeric()) { // if one is int - if (left.isInteger()) { + if (left.isWholeNumber()) { // promote the highest int - if (right.isInteger()) { + if (right.isWholeNumber()) { if (left == UNSIGNED_LONG || right == UNSIGNED_LONG) { return UNSIGNED_LONG; } @@ -90,7 +90,7 @@ public static DataType commonType(DataType left, DataType right) { return right; } // try the other side - if (right.isInteger()) { + if (right.isWholeNumber()) { return left; } // promote the highest rational @@ -200,10 +200,10 @@ private static Converter conversionToVersion(DataType from) { } private static Converter conversionToUnsignedLong(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_UNSIGNED_LONG; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_UNSIGNED_LONG; } if (from == BOOLEAN) { @@ -219,10 +219,10 @@ private static Converter conversionToUnsignedLong(DataType from) { } private static Converter conversionToLong(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_LONG; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_LONG; } if (from == BOOLEAN) { @@ -238,10 +238,10 @@ private static Converter conversionToLong(DataType from) { } private static Converter conversionToInt(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_INT; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_INT; } if (from == BOOLEAN) { @@ -257,10 +257,10 @@ private static Converter conversionToInt(DataType from) { } private static Converter conversionToShort(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_SHORT; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_SHORT; } if (from == BOOLEAN) { @@ -276,10 +276,10 @@ private static Converter conversionToShort(DataType from) { } private static Converter conversionToByte(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_BYTE; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_BYTE; } if (from == BOOLEAN) { @@ -295,10 +295,10 @@ private static Converter conversionToByte(DataType from) { } private static DefaultConverter conversionToFloat(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_FLOAT; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_FLOAT; } if (from == BOOLEAN) { @@ -314,10 +314,10 @@ private static DefaultConverter conversionToFloat(DataType from) { } private static DefaultConverter conversionToDouble(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_DOUBLE; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_DOUBLE; } if (from == BOOLEAN) { @@ -333,10 +333,10 @@ private static DefaultConverter conversionToDouble(DataType from) { } private static DefaultConverter conversionToDateTime(DataType from) { - if (from.isRational()) { + if (from.isRationalNumber()) { return DefaultConverter.RATIONAL_TO_DATETIME; } - if (from.isInteger()) { + if (from.isWholeNumber()) { return DefaultConverter.INTEGER_TO_DATETIME; } if (from == BOOLEAN) { @@ -628,6 +628,6 @@ public static DataType asInteger(DataType dataType) { return dataType; } - return dataType.isInteger() ? dataType : LONG; + return dataType.isWholeNumber() ? dataType : LONG; } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java index bdc60ebab55ef..4ef20a724ab3c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java @@ -55,7 +55,7 @@ public EsField(String name, DataType esDataType, Map properties public EsField(StreamInput in) throws IOException { this.name = in.readString(); this.esDataType = DataType.readFrom(in); - this.properties = in.readImmutableMap(StreamInput::readString, i -> i.readNamedWriteable(EsField.class)); + this.properties = in.readImmutableMap(i -> i.readNamedWriteable(EsField.class)); this.aggregatable = in.readBoolean(); this.isAlias = in.readBoolean(); } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/Schema.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/Schema.java deleted file mode 100644 index fa7c1d7e1e3e6..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/Schema.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.type; - -import org.elasticsearch.xpack.esql.core.util.Check; - -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - -import static java.util.Collections.emptyList; - -public class Schema implements Iterable { - - public interface Entry { - String name(); - - DataType type(); - } - - static class DefaultEntry implements Entry { - private final String name; - private final DataType type; - - DefaultEntry(String name, DataType type) { - this.name = name; - this.type = type; - } - - @Override - public String name() { - return name; - } - - @Override - public DataType type() { - return type; - } - } - - public static final Schema EMPTY = new Schema(emptyList(), emptyList()); - - private final List names; - private final List types; - - public Schema(List names, List types) { - Check.isTrue(names.size() == types.size(), "Different # of names {} vs types {}", names, types); - this.types = types; - this.names = names; - } - - public List names() { - return names; - } - - public List types() { - return types; - } - - public int size() { - return names.size(); - } - - public Entry get(int i) { - return new DefaultEntry(names.get(i), types.get(i)); - } - - public DataType type(String name) { - int indexOf = names.indexOf(name); - if (indexOf < 0) { - return null; - } - return types.get(indexOf); - } - - @Override - public Iterator iterator() { - return new Iterator<>() { - private final int size = size(); - private int pos = -1; - - @Override - public boolean hasNext() { - return pos < size - 1; - } - - @Override - public Entry next() { - if (pos++ >= size) { - throw new NoSuchElementException(); - } - return get(pos); - } - }; - } - - public Stream stream() { - return StreamSupport.stream(spliterator(), false); - } - - @Override - public Spliterator spliterator() { - return Spliterators.spliterator(iterator(), size(), 0); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("["); - for (int i = 0; i < names.size(); i++) { - if (i > 0) { - sb.append(","); - } - sb.append(names.get(i)); - sb.append(":"); - sb.append(types.get(i).typeName()); - } - sb.append("]"); - return sb.toString(); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/ActionListeners.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/ActionListeners.java deleted file mode 100644 index 025f9c2b6fd7a..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/ActionListeners.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.util; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.CheckedFunction; - -import java.util.function.Consumer; - -public class ActionListeners { - - private ActionListeners() {} - - /** - * Combination of {@link ActionListener#wrap(CheckedConsumer, Consumer)} and {@link ActionListener#map} - */ - public static ActionListener map(ActionListener delegate, CheckedFunction fn) { - return delegate.delegateFailureAndWrap((l, r) -> l.onResponse(fn.apply(r))); - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/Graphviz.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/Graphviz.java deleted file mode 100644 index 5502f04549ce3..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/Graphviz.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.util; - -import org.elasticsearch.xpack.esql.core.tree.Node; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicInteger; - -// use the awesome http://mdaines.github.io/viz.js/ to visualize and play around with the various options -public abstract class Graphviz { - - private static final int NODE_LABEL_INDENT = 12; - private static final int CLUSTER_INDENT = 2; - private static final int INDENT = 1; - - public static String dot(String name, Node root) { - StringBuilder sb = new StringBuilder(); - // name - sb.append(String.format(Locale.ROOT, """ - digraph G { rankdir=BT; - label="%s"; - node[shape=plaintext, color=azure1]; - edge[color=black,arrowsize=0.5]; - """, name)); - handleNode(sb, root, new AtomicInteger(0), INDENT, true); - sb.append("}"); - return sb.toString(); - } - - public static String dot(Map> clusters, boolean drawSubTrees) { - AtomicInteger nodeCounter = new AtomicInteger(0); - - StringBuilder sb = new StringBuilder(); - // name - sb.append(""" - digraph G { rankdir=BT; - node[shape=plaintext, color=azure1]; - edge[color=black]; - graph[compound=true]; - - """); - - int clusterNodeStart = 1; - int clusterId = 0; - - StringBuilder clusterEdges = new StringBuilder(); - - for (Entry> entry : clusters.entrySet()) { - indent(sb, INDENT); - // draw cluster - sb.append("subgraph cluster"); - sb.append(++clusterId); - sb.append(" {\n"); - indent(sb, CLUSTER_INDENT); - sb.append("color=blue;\n"); - indent(sb, CLUSTER_INDENT); - sb.append("label="); - sb.append(quoteGraphviz(entry.getKey())); - sb.append(";\n\n"); - - /* to help align the clusters, add an invisible node (that could - * otherwise be used for labeling but it consumes too much space) - * used for alignment */ - indent(sb, CLUSTER_INDENT); - sb.append("c" + clusterId); - sb.append("[style=invis]\n"); - // add edge to the first node in the cluster - indent(sb, CLUSTER_INDENT); - sb.append("node" + (nodeCounter.get() + 1)); - sb.append(" -> "); - sb.append("c" + clusterId); - sb.append(" [style=invis];\n"); - - handleNode(sb, entry.getValue(), nodeCounter, CLUSTER_INDENT, drawSubTrees); - - int clusterNodeStop = nodeCounter.get(); - - indent(sb, INDENT); - sb.append("}\n"); - - // connect cluster only if there are at least two - if (clusterId > 1) { - indent(clusterEdges, INDENT); - clusterEdges.append("node" + clusterNodeStart); - clusterEdges.append(" -> "); - clusterEdges.append("node" + clusterNodeStop); - clusterEdges.append("[ltail=cluster"); - clusterEdges.append(clusterId - 1); - clusterEdges.append(" lhead=cluster"); - clusterEdges.append(clusterId); - clusterEdges.append("];\n"); - } - clusterNodeStart = clusterNodeStop; - } - - sb.append("\n"); - - // connecting the clusters arranges them in a weird position - // so don't - // sb.append(clusterEdges.toString()); - - // align the cluster by requiring the invisible nodes in each cluster to be of the same rank - indent(sb, INDENT); - sb.append("{ rank=same"); - for (int i = 1; i <= clusterId; i++) { - sb.append(" c" + i); - } - sb.append(" };\n}"); - - return sb.toString(); - } - - private static void handleNode(StringBuilder output, Node n, AtomicInteger nodeId, int currentIndent, boolean drawSubTrees) { - // each node has its own id - int thisId = nodeId.incrementAndGet(); - - // first determine node info - StringBuilder nodeInfo = new StringBuilder(); - nodeInfo.append("\n"); - indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); - nodeInfo.append(""" - - """); - indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); - nodeInfo.append(String.format(Locale.ROOT, """ - - """, n.nodeName())); - indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); - - List props = n.nodeProperties(); - List parsed = new ArrayList<>(props.size()); - List> subTrees = new ArrayList<>(); - - for (Object v : props) { - // skip null values, children and location - if (v != null && n.children().contains(v) == false) { - if (v instanceof Collection c) { - StringBuilder colS = new StringBuilder(); - for (Object o : c) { - if (drawSubTrees && isAnotherTree(o)) { - subTrees.add((Node) o); - } else { - colS.append(o); - colS.append("\n"); - } - } - if (colS.length() > 0) { - parsed.add(colS.toString()); - } - } else { - if (drawSubTrees && isAnotherTree(v)) { - subTrees.add((Node) v); - } else { - parsed.add(v.toString()); - } - } - } - } - - for (String line : parsed) { - nodeInfo.append("\n"); - indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); - } - - nodeInfo.append("
      %s
      "); - nodeInfo.append(escapeHtml(line)); - nodeInfo.append("
      \n"); - - // check any subtrees - if (subTrees.isEmpty() == false) { - // write nested trees - output.append(String.format(Locale.ROOT, """ - subgraph cluster_%s{ - style=filled; color=white; fillcolor=azure2; label=""; - """, thisId)); - } - - // write node info - indent(output, currentIndent); - output.append("node"); - output.append(thisId); - output.append("[label="); - output.append(quoteGraphviz(nodeInfo.toString())); - output.append("];\n"); - - if (subTrees.isEmpty() == false) { - indent(output, currentIndent + INDENT); - output.append("node[shape=ellipse, color=black]\n"); - - for (Node node : subTrees) { - indent(output, currentIndent + INDENT); - drawNodeTree(output, node, "st_" + thisId + "_", 0); - } - - output.append("\n}\n"); - } - - indent(output, currentIndent + 1); - // output.append("{ rankdir=LR; rank=same; \n"); - int prevId = -1; - // handle children - for (Node c : n.children()) { - // the child will always have the next id - int childId = nodeId.get() + 1; - handleNode(output, c, nodeId, currentIndent + INDENT, drawSubTrees); - indent(output, currentIndent + 1); - output.append("node"); - output.append(childId); - output.append(" -> "); - output.append("node"); - output.append(thisId); - output.append(";\n"); - - // add invisible connection between children for ordering - if (prevId != -1) { - indent(output, currentIndent + 1); - output.append("node"); - output.append(prevId); - output.append(" -> "); - output.append("node"); - output.append(childId); - output.append(";\n"); - } - prevId = childId; - } - indent(output, currentIndent); - // output.append("}\n"); - } - - private static void drawNodeTree(StringBuilder sb, Node node, String prefix, int counter) { - String nodeName = prefix + counter; - prefix = nodeName; - - // draw node - drawNode(sb, node, nodeName); - // then draw all children nodes and connections between them to be on the same level - sb.append("{ rankdir=LR; rank=same;\n"); - int prevId = -1; - int saveId = counter; - for (Node child : node.children()) { - int currId = ++counter; - drawNode(sb, child, prefix + currId); - if (prevId > -1) { - sb.append(prefix + prevId + " -> " + prefix + currId + " [style=invis];\n"); - } - prevId = currId; - } - sb.append("}\n"); - - // now draw connections to the parent - for (int i = saveId; i < counter; i++) { - sb.append(prefix + (i + 1) + " -> " + nodeName + ";\n"); - } - - // draw the child - counter = saveId; - for (Node child : node.children()) { - drawNodeTree(sb, child, prefix, ++counter); - } - } - - private static void drawNode(StringBuilder sb, Node node, String nodeName) { - if (node.children().isEmpty()) { - sb.append(nodeName + " [label=\"" + node.toString() + "\"];\n"); - } else { - sb.append(nodeName + " [label=\"" + node.nodeName() + "\"];\n"); - } - } - - private static boolean isAnotherTree(Object value) { - if (value instanceof Node n) { - // create a subgraph - if (n.children().size() > 0) { - return true; - } - } - return false; - } - - private static String escapeHtml(Object value) { - return String.valueOf(value) - .replace("&", "&") - .replace("\"", """) - .replace("'", "'") - .replace("<", "<") - .replace(">", ">") - .replace("\n", "
      "); - } - - private static String quoteGraphviz(String value) { - if (value.contains("<")) { - return "<" + value + ">"; - } - - return "\"" + value + "\""; - } - - private static void indent(StringBuilder sb, int indent) { - for (int i = 0; i < indent; i++) { - sb.append(" "); - } - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/LoggingUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/LoggingUtils.java deleted file mode 100644 index 09b80b25ca5f8..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/LoggingUtils.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.util; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.logging.Level; -import org.elasticsearch.logging.Logger; -import org.elasticsearch.rest.RestStatus; - -public final class LoggingUtils { - - private LoggingUtils() {} - - public static void logOnFailure(Logger logger, Throwable throwable) { - RestStatus status = ExceptionsHelper.status(throwable); - logger.log(status.getStatus() >= 500 ? Level.WARN : Level.DEBUG, () -> "Request failed with status [" + status + "]: ", throwable); - } - -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java index 485084bac60b3..df8fac06dd478 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.esql.core.util; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -35,10 +33,4 @@ public interface PlanStreamInput { * the same result. */ NameId mapNameId(long id) throws IOException; - - /** - * Read an {@link Expression} from the stream. This will soon be replaced with - * {@link StreamInput#readNamedWriteable}. - */ - Expression readExpression() throws IOException; } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java deleted file mode 100644 index 6a3d8fb77316c..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.util; - -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.expression.Expression; - -import java.io.IOException; - -/** - * Interface for streams that can serialize plan components. This exists so - * ESQL proper can expose streaming capability to ESQL-core. If the world is kind - * and just we'll remove this when we flatten everything from ESQL-core into - * ESQL proper. - */ -public interface PlanStreamOutput { - /** - * Write an {@link Expression} to the stream. This will soon be replaced with - * {@link StreamOutput#writeNamedWriteable}. - */ - void writeExpression(Expression expression) throws IOException; -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java index 47246a4e190dd..4ba3658697c0d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java @@ -354,6 +354,9 @@ public static Number parseIntegral(String string) throws InvalidArgumentExceptio } return bi; } + if (bi.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + throw new InvalidArgumentException("Magnitude of negative number [{}] is too large", string); + } // try to downsize to int if possible (since that's the most common type) if (bi.intValue() == bi.longValue()) { // ternary operator would always promote to Long return bi.intValueExact(); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/action/QlStatusResponseTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/action/QlStatusResponseTests.java deleted file mode 100644 index e38755b703913..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/action/QlStatusResponseTests.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.action; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.esql.core.async.QlStatusResponse; - -import java.io.IOException; -import java.util.Date; - -import static org.elasticsearch.xpack.core.async.GetAsyncResultRequestTests.randomSearchId; - -public class QlStatusResponseTests extends AbstractWireSerializingTestCase { - - @Override - protected QlStatusResponse createTestInstance() { - String id = randomSearchId(); - boolean isRunning = randomBoolean(); - boolean isPartial = isRunning ? randomBoolean() : false; - long randomDate = (new Date(randomLongBetween(0, 3000000000000L))).getTime(); - Long startTimeMillis = randomBoolean() ? null : randomDate; - long expirationTimeMillis = startTimeMillis == null ? randomDate : startTimeMillis + 3600000L; - RestStatus completionStatus = isRunning ? null : randomBoolean() ? RestStatus.OK : RestStatus.SERVICE_UNAVAILABLE; - return new QlStatusResponse(id, isRunning, isPartial, startTimeMillis, expirationTimeMillis, completionStatus); - } - - @Override - protected Writeable.Reader instanceReader() { - return QlStatusResponse::new; - } - - @Override - protected QlStatusResponse mutateInstance(QlStatusResponse instance) { - // return a response with the opposite running status - boolean isRunning = instance.isRunning() == false; - boolean isPartial = isRunning ? randomBoolean() : false; - RestStatus completionStatus = isRunning ? null : randomBoolean() ? RestStatus.OK : RestStatus.SERVICE_UNAVAILABLE; - return new QlStatusResponse( - instance.getId(), - isRunning, - isPartial, - instance.getStartTime(), - instance.getExpirationTime(), - completionStatus - ); - } - - public void testToXContent() throws IOException { - QlStatusResponse response = createTestInstance(); - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - Object[] args = new Object[] { - response.getId(), - response.isRunning(), - response.isPartial(), - response.getStartTime() != null ? "\"start_time_in_millis\" : " + response.getStartTime() + "," : "", - response.getExpirationTime(), - response.getCompletionStatus() != null ? ", \"completion_status\" : " + response.getCompletionStatus().getStatus() : "" }; - String expectedJson = Strings.format(""" - { - "id" : "%s", - "is_running" : %s, - "is_partial" : %s, - %s - "expiration_time_in_millis" : %s - %s - } - """, args); - response.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(XContentHelper.stripWhitespace(expectedJson), Strings.toString(builder)); - } - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ConstantExtractorTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ConstantExtractorTests.java deleted file mode 100644 index a7b55ba38be12..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/execution/search/extractor/ConstantExtractorTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.execution.search.extractor; - -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.AbstractWireSerializingTestCase; - -import java.util.function.Supplier; - -public class ConstantExtractorTests extends AbstractWireSerializingTestCase { - public static ConstantExtractor randomConstantExtractor() { - return new ConstantExtractor(randomValidConstant()); - } - - private static Object randomValidConstant() { - @SuppressWarnings("unchecked") - Supplier valueSupplier = randomFrom(() -> randomInt(), () -> randomDouble(), () -> randomAlphaOfLengthBetween(1, 140)); - return valueSupplier.get(); - } - - @Override - protected ConstantExtractor createTestInstance() { - return randomConstantExtractor(); - } - - @Override - protected Reader instanceReader() { - return ConstantExtractor::new; - } - - @Override - protected ConstantExtractor mutateInstance(ConstantExtractor instance) { - return new ConstantExtractor(instance.extract((SearchHit) null) + "mutated"); - } - - public void testGet() { - Object expected = randomValidConstant(); - int times = between(1, 1000); - for (int i = 0; i < times; i++) { - assertSame(expected, new ConstantExtractor(expected).extract((SearchHit) null)); - } - } - - public void testToString() { - assertEquals("^foo", new ConstantExtractor("foo").toString()); - assertEquals("^42", new ConstantExtractor("42").toString()); - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/NullabilityTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/NullabilityTests.java index fbeac1748ac81..9bcbaef0060ff 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/NullabilityTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/NullabilityTests.java @@ -6,11 +6,14 @@ */ package org.elasticsearch.xpack.esql.core.expression; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.io.IOException; + import static java.util.Arrays.asList; import static org.elasticsearch.xpack.esql.core.expression.Nullability.FALSE; import static org.elasticsearch.xpack.esql.core.expression.Nullability.TRUE; @@ -28,6 +31,16 @@ public Nullable(Source source, Nullability nullability) { this.nullability = nullability; } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override public Nullability nullable() { return nullability; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionRegistryTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionRegistryTests.java deleted file mode 100644 index c7ab9731cb8dc..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionRegistryTests.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.function; - -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.core.ParsingException; -import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.ConfigurationFunction; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.esql.core.session.Configuration; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.tree.SourceTests; -import org.elasticsearch.xpack.esql.core.type.DataType; - -import java.util.Arrays; -import java.util.List; - -import static java.util.Collections.emptyList; -import static org.elasticsearch.xpack.esql.core.TestUtils.randomConfiguration; -import static org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry.def; -import static org.elasticsearch.xpack.esql.core.expression.function.FunctionResolutionStrategy.DEFAULT; -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; - -public class FunctionRegistryTests extends ESTestCase { - - public void testNoArgFunction() { - UnresolvedFunction ur = uf(DEFAULT); - FunctionRegistry r = new FunctionRegistry(defineDummyNoArgFunction()); - FunctionDefinition def = r.resolveFunction(ur.name()); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - } - - public static FunctionDefinition defineDummyNoArgFunction() { - return def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION"); - } - - public void testUnaryFunction() { - UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class)); - FunctionRegistry r = new FunctionRegistry(defineDummyUnaryFunction(ur)); - FunctionDefinition def = r.resolveFunction(ur.name()); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - // No children aren't supported - ParsingException e = expectThrows(ParsingException.class, () -> uf(DEFAULT).buildResolved(randomConfiguration(), def)); - assertThat(e.getMessage(), endsWith("expects exactly one argument")); - - // Multiple children aren't supported - e = expectThrows( - ParsingException.class, - () -> uf(DEFAULT, mock(Expression.class), mock(Expression.class)).buildResolved(randomConfiguration(), def) - ); - assertThat(e.getMessage(), endsWith("expects exactly one argument")); - } - - public static FunctionDefinition defineDummyUnaryFunction(UnresolvedFunction ur) { - return def(DummyFunction.class, (Source l, Expression e) -> { - assertSame(e, ur.children().get(0)); - return new DummyFunction(l); - }, "DUMMY_FUNCTION"); - } - - public void testBinaryFunction() { - UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class), mock(Expression.class)); - FunctionRegistry r = new FunctionRegistry(def(DummyFunction.class, (Source l, Expression lhs, Expression rhs) -> { - assertSame(lhs, ur.children().get(0)); - assertSame(rhs, ur.children().get(1)); - return new DummyFunction(l); - }, "DUMMY_FUNCTION")); - FunctionDefinition def = r.resolveFunction(ur.name()); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - // No children aren't supported - ParsingException e = expectThrows(ParsingException.class, () -> uf(DEFAULT).buildResolved(randomConfiguration(), def)); - assertThat(e.getMessage(), endsWith("expects exactly two arguments")); - - // One child isn't supported - e = expectThrows(ParsingException.class, () -> uf(DEFAULT, mock(Expression.class)).buildResolved(randomConfiguration(), def)); - assertThat(e.getMessage(), endsWith("expects exactly two arguments")); - - // Many children aren't supported - e = expectThrows( - ParsingException.class, - () -> uf(DEFAULT, mock(Expression.class), mock(Expression.class), mock(Expression.class)).buildResolved( - randomConfiguration(), - def - ) - ); - assertThat(e.getMessage(), endsWith("expects exactly two arguments")); - } - - public void testAliasNameIsTheSameAsAFunctionName() { - FunctionRegistry r = new FunctionRegistry(def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS")); - QlIllegalArgumentException iae = expectThrows( - QlIllegalArgumentException.class, - () -> r.register(def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "DUMMY_FUNCTION")) - ); - assertEquals("alias [DUMMY_FUNCTION] is used by [DUMMY_FUNCTION] and [DUMMY_FUNCTION2]", iae.getMessage()); - } - - public void testDuplicateAliasInTwoDifferentFunctionsFromTheSameBatch() { - QlIllegalArgumentException iae = expectThrows( - QlIllegalArgumentException.class, - () -> new FunctionRegistry( - def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS"), - def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "ALIAS") - ) - ); - assertEquals("alias [ALIAS] is used by [DUMMY_FUNCTION(ALIAS)] and [DUMMY_FUNCTION2]", iae.getMessage()); - } - - public void testDuplicateAliasInTwoDifferentFunctionsFromTwoDifferentBatches() { - FunctionRegistry r = new FunctionRegistry(def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS")); - QlIllegalArgumentException iae = expectThrows( - QlIllegalArgumentException.class, - () -> r.register(def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "ALIAS")) - ); - assertEquals("alias [ALIAS] is used by [DUMMY_FUNCTION] and [DUMMY_FUNCTION2]", iae.getMessage()); - } - - public void testFunctionResolving() { - UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class)); - FunctionRegistry r = new FunctionRegistry(def(DummyFunction.class, (Source l, Expression e) -> { - assertSame(e, ur.children().get(0)); - return new DummyFunction(l); - }, "DUMMY_FUNCTION", "DUMMY_FUNC")); - - // Resolve by primary name - FunctionDefinition def = r.resolveFunction(r.resolveAlias("DuMMy_FuncTIon")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - def = r.resolveFunction(r.resolveAlias("Dummy_Function")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - def = r.resolveFunction(r.resolveAlias("dummy_function")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - def = r.resolveFunction(r.resolveAlias("DUMMY_FUNCTION")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - // Resolve by alias - def = r.resolveFunction(r.resolveAlias("DumMy_FunC")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - def = r.resolveFunction(r.resolveAlias("dummy_func")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - def = r.resolveFunction(r.resolveAlias("DUMMY_FUNC")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - - // Not resolved - QlIllegalArgumentException e = expectThrows( - QlIllegalArgumentException.class, - () -> r.resolveFunction(r.resolveAlias("DummyFunction")) - ); - assertThat(e.getMessage(), is("Cannot find function DUMMYFUNCTION; this should have been caught during analysis")); - - e = expectThrows(QlIllegalArgumentException.class, () -> r.resolveFunction(r.resolveAlias("dummyFunction"))); - assertThat(e.getMessage(), is("Cannot find function DUMMYFUNCTION; this should have been caught during analysis")); - } - - public void testConfigurationOptionalFunction() { - UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class)); - FunctionRegistry r = new FunctionRegistry( - def(DummyConfigurationOptionalArgumentFunction.class, (Source l, Expression e, Configuration c) -> { - assertSame(e, ur.children().get(0)); - return new DummyConfigurationOptionalArgumentFunction(l, List.of(ur), c); - }, "DUMMY") - ); - FunctionDefinition def = r.resolveFunction(r.resolveAlias("DUMMY")); - assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); - } - - public static UnresolvedFunction uf(FunctionResolutionStrategy resolutionStrategy, Expression... children) { - return new UnresolvedFunction(SourceTests.randomSource(), "DUMMY_FUNCTION", resolutionStrategy, Arrays.asList(children)); - } - - public static class DummyFunction extends ScalarFunction { - public DummyFunction(Source source) { - super(source, emptyList()); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this); - } - - @Override - public Expression replaceChildren(List newChildren) { - throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); - } - - @Override - public DataType dataType() { - return null; - } - } - - public static class DummyFunction2 extends DummyFunction { - public DummyFunction2(Source source) { - super(source); - } - } - - public static class DummyConfigurationOptionalArgumentFunction extends ConfigurationFunction implements OptionalArgument { - - public DummyConfigurationOptionalArgumentFunction(Source source, List fields, Configuration configuration) { - super(source, fields, configuration); - } - - @Override - public DataType dataType() { - return null; - } - - @Override - public Expression replaceChildren(List newChildren) { - return new DummyConfigurationOptionalArgumentFunction(source(), newChildren, configuration()); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, DummyConfigurationOptionalArgumentFunction::new, children(), configuration()); - } - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/TestFunctionRegistry.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/TestFunctionRegistry.java deleted file mode 100644 index 3d17a6ea79624..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/TestFunctionRegistry.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.core.expression.function; - -public class TestFunctionRegistry extends FunctionRegistry { - - public TestFunctionRegistry(FunctionDefinition... definitions) { - super(definitions); - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ChainingProcessorTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ChainingProcessorTests.java deleted file mode 100644 index f7bbbd9f61189..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ChainingProcessorTests.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogicProcessorTests; -import org.elasticsearch.xpack.esql.core.expression.processor.Processors; - -import java.util.ArrayList; -import java.util.List; -import java.util.function.Supplier; - -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109012") -public class ChainingProcessorTests extends AbstractWireSerializingTestCase { - public static ChainingProcessor randomComposeProcessor() { - return new ChainingProcessor(randomProcessor(), randomProcessor()); - } - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(Processors.getNamedWriteables()); - } - - @Override - protected ChainingProcessor createTestInstance() { - return randomComposeProcessor(); - } - - @Override - protected Reader instanceReader() { - return ChainingProcessor::new; - } - - @Override - protected ChainingProcessor mutateInstance(ChainingProcessor instance) { - @SuppressWarnings("unchecked") - Supplier supplier = randomFrom( - () -> new ChainingProcessor(instance.first(), randomValueOtherThan(instance.second(), () -> randomProcessor())), - () -> new ChainingProcessor(randomValueOtherThan(instance.first(), () -> randomProcessor()), instance.second()) - ); - return supplier.get(); - } - - public static Processor randomProcessor() { - List> options = new ArrayList<>(); - options.add(ChainingProcessorTests::randomComposeProcessor); - options.add(BinaryLogicProcessorTests::randomProcessor); - return randomFrom(options).get(); - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantProcessorTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantProcessorTests.java deleted file mode 100644 index 00ca460920d03..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/gen/processor/ConstantProcessorTests.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.gen.processor; - -import org.elasticsearch.common.io.stream.ByteArrayStreamInput; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.versionfield.Version; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.time.Clock; -import java.time.Duration; -import java.time.ZonedDateTime; - -public class ConstantProcessorTests extends AbstractWireSerializingTestCase { - - public static ConstantProcessor randomConstantProcessor() { - if (randomBoolean()) { - Clock clock = Clock.tickMillis(randomZone()); - if (randomBoolean()) { - clock = Clock.tick(clock, Duration.ofNanos(1)); - } - return new ConstantProcessor(ZonedDateTime.now(clock)); - } else { - return new ConstantProcessor(randomAlphaOfLength(5)); - } - } - - @Override - protected ConstantProcessor createTestInstance() { - return randomConstantProcessor(); - } - - @Override - protected Reader instanceReader() { - return ConstantProcessor::new; - } - - @Override - protected ConstantProcessor mutateInstance(ConstantProcessor instance) { - return new ConstantProcessor(randomValueOtherThan(instance.process(null), () -> randomLong())); - } - - public void testApply() { - ConstantProcessor proc = new ConstantProcessor("test"); - assertEquals("test", proc.process(null)); - assertEquals("test", proc.process("cat")); - } - - public void testReadWriteVersion() throws IOException { - ConstantProcessor original = new ConstantProcessor(new Version("1.2.3")); - try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); StreamOutput out = new OutputStreamStreamOutput(baos)) { - original.writeTo(out); - try (StreamInput is = new ByteArrayStreamInput(baos.toByteArray())) { - ConstantProcessor result = new ConstantProcessor(is); - assertEquals(Version.class, result.process(null).getClass()); - assertEquals("1.2.3", result.process(null).toString()); - } - } - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicOperationTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicOperationTests.java new file mode 100644 index 0000000000000..05279b74f6382 --- /dev/null +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicOperationTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.core.expression.predicate.logical; + +import org.elasticsearch.test.ESTestCase; + +public class BinaryLogicOperationTests extends ESTestCase { + + public void testOR() { + assertEquals(true, BinaryLogicOperation.OR.apply(true, false)); + assertEquals(true, BinaryLogicOperation.OR.apply(false, true)); + assertEquals(false, BinaryLogicOperation.OR.apply(false, false)); + assertEquals(true, BinaryLogicOperation.OR.apply(true, true)); + } + + public void testORNullHandling() { + assertEquals(true, BinaryLogicOperation.OR.apply(true, null)); + assertEquals(true, BinaryLogicOperation.OR.apply(null, true)); + assertNull(BinaryLogicOperation.OR.apply(false, null)); + assertNull(BinaryLogicOperation.OR.apply(null, false)); + assertNull(BinaryLogicOperation.OR.apply(null, null)); + } + + public void testAnd() { + assertEquals(false, BinaryLogicOperation.AND.apply(true, false)); + assertEquals(false, BinaryLogicOperation.AND.apply(false, true)); + assertEquals(false, BinaryLogicOperation.AND.apply(false, false)); + assertEquals(true, BinaryLogicOperation.AND.apply(true, true)); + } + + public void testAndNullHandling() { + assertNull(BinaryLogicOperation.AND.apply(true, null)); + assertNull(BinaryLogicOperation.AND.apply(null, true)); + assertEquals(false, BinaryLogicOperation.AND.apply(false, null)); + assertEquals(false, BinaryLogicOperation.AND.apply(null, false)); + assertNull(BinaryLogicOperation.AND.apply(null, null)); + } +} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicProcessorTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicProcessorTests.java deleted file mode 100644 index 83a9ca0a8ee3d..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/logical/BinaryLogicProcessorTests.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.logical; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.ConstantProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; -import org.elasticsearch.xpack.esql.core.expression.processor.Processors; - -public class BinaryLogicProcessorTests extends AbstractWireSerializingTestCase { - - private static final Processor FALSE = new ConstantProcessor(false); - private static final Processor TRUE = new ConstantProcessor(true); - private static final Processor NULL = new ConstantProcessor((Object) null); - - public static BinaryLogicProcessor randomProcessor() { - return new BinaryLogicProcessor( - new ConstantProcessor(randomFrom(Boolean.FALSE, Boolean.TRUE, null)), - new ConstantProcessor(randomFrom(Boolean.FALSE, Boolean.TRUE, null)), - randomFrom(BinaryLogicProcessor.BinaryLogicOperation.values()) - ); - } - - @Override - protected BinaryLogicProcessor createTestInstance() { - return randomProcessor(); - } - - @Override - protected BinaryLogicProcessor mutateInstance(BinaryLogicProcessor instance) { - return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 - } - - @Override - protected Reader instanceReader() { - return BinaryLogicProcessor::new; - } - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(Processors.getNamedWriteables()); - } - - public void testOR() { - assertEquals(true, new BinaryLogicProcessor(TRUE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - assertEquals(true, new BinaryLogicProcessor(FALSE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - assertEquals(false, new BinaryLogicProcessor(FALSE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - assertEquals(true, new BinaryLogicProcessor(TRUE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - } - - public void testORNullHandling() { - assertEquals(true, new BinaryLogicProcessor(TRUE, NULL, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - assertEquals(true, new BinaryLogicProcessor(NULL, TRUE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - assertNull(new BinaryLogicProcessor(FALSE, NULL, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - assertNull(new BinaryLogicProcessor(NULL, FALSE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - assertNull(new BinaryLogicProcessor(NULL, NULL, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); - } - - public void testAnd() { - assertEquals(false, new BinaryLogicProcessor(TRUE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - assertEquals(false, new BinaryLogicProcessor(FALSE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - assertEquals(false, new BinaryLogicProcessor(FALSE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - assertEquals(true, new BinaryLogicProcessor(TRUE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - } - - public void testAndNullHandling() { - assertNull(new BinaryLogicProcessor(TRUE, NULL, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - assertNull(new BinaryLogicProcessor(NULL, TRUE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - assertEquals(false, new BinaryLogicProcessor(FALSE, NULL, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - assertEquals(false, new BinaryLogicProcessor(NULL, FALSE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - assertNull(new BinaryLogicProcessor(NULL, NULL, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/CheckNullProcessorTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/CheckNullProcessorTests.java deleted file mode 100644 index 69104c7601f6a..0000000000000 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/predicate/nulls/CheckNullProcessorTests.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.nulls; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.ConstantProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; -import org.elasticsearch.xpack.esql.core.expression.processor.Processors; - -public class CheckNullProcessorTests extends AbstractWireSerializingTestCase { - - private static final Processor FALSE = new ConstantProcessor(false); - private static final Processor TRUE = new ConstantProcessor(true); - private static final Processor NULL = new ConstantProcessor((Object) null); - - public static CheckNullProcessor randomProcessor() { - return new CheckNullProcessor(randomFrom(CheckNullProcessor.CheckNullOperation.values())); - } - - @Override - protected CheckNullProcessor createTestInstance() { - return randomProcessor(); - } - - @Override - protected CheckNullProcessor mutateInstance(CheckNullProcessor instance) { - return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 - } - - @Override - protected Reader instanceReader() { - return CheckNullProcessor::new; - } - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(Processors.getNamedWriteables()); - } - - public void testIsNull() { - assertEquals(true, new CheckNullProcessor(CheckNullProcessor.CheckNullOperation.IS_NULL).process(null)); - assertEquals(false, new CheckNullProcessor(CheckNullProcessor.CheckNullOperation.IS_NULL).process("foo")); - assertEquals(false, new CheckNullProcessor(CheckNullProcessor.CheckNullOperation.IS_NULL).process(1)); - } - - public void testIsNotNull() { - assertEquals(false, new CheckNullProcessor(CheckNullProcessor.CheckNullOperation.IS_NOT_NULL).process(null)); - assertEquals(true, new CheckNullProcessor(CheckNullProcessor.CheckNullOperation.IS_NOT_NULL).process("foo")); - assertEquals(true, new CheckNullProcessor(CheckNullProcessor.CheckNullOperation.IS_NOT_NULL).process(1)); - } -} diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/optimizer/OptimizerRulesTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/optimizer/OptimizerRulesTests.java index 12dbb23a86c59..789e9a22d39d1 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/optimizer/OptimizerRulesTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/optimizer/OptimizerRulesTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.core.optimizer; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.TestUtils; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -17,6 +18,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.io.IOException; import java.util.Collections; import java.util.List; @@ -38,6 +40,16 @@ public DummyBooleanExpression(Source source, int id) { this.id = id; } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override protected NodeInfo info() { return NodeInfo.create(this, DummyBooleanExpression::new, id); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/tree/NodeSubclassTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/tree/NodeSubclassTests.java index 80f63b1293e61..d4065810dabc3 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/tree/NodeSubclassTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/tree/NodeSubclassTests.java @@ -6,706 +6,21 @@ */ package org.elasticsearch.xpack.esql.core.tree; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttributeTests; -import org.elasticsearch.xpack.esql.core.expression.function.Function; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.ConstantProcessor; -import org.elasticsearch.xpack.esql.core.expression.gen.processor.Processor; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.FullTextPredicate; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.Like; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.LikePattern; -import org.elasticsearch.xpack.esql.core.tree.NodeTests.ChildrenAreAProperty; -import org.elasticsearch.xpack.esql.core.tree.NodeTests.Dummy; -import org.elasticsearch.xpack.esql.core.tree.NodeTests.NoChildren; -import org.mockito.exceptions.base.MockitoException; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.lang.reflect.WildcardType; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.Collection; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.function.Predicate; -import java.util.jar.JarEntry; -import java.util.jar.JarInputStream; -import static java.util.Arrays.asList; -import static java.util.Collections.emptyList; -import static org.mockito.Mockito.mock; +import java.util.function.Function; /** - * Looks for all subclasses of {@link Node} and verifies that they - * implement {@link Node#info()} and - * {@link Node#replaceChildren(List)} sanely. It'd be better if - * each subclass had its own test case that verified those methods - * and any other interesting things that that they do but we're a - * long way from that and this gets the job done for now. - *

      - * This test attempts to use reflection to create believeable nodes - * and manipulate them in believeable ways with as little knowledge - * of the actual subclasses as possible. This is problematic because - * it is possible, for example, for nodes to stackoverflow because - * they can contain themselves. So this class - * does have some {@link Node}-subclass-specific - * knowledge. As little as I could get away with though. - *

      - * When there are actual tests for a subclass of {@linkplain Node} - * then this class will do two things: - *

        - *
      • Skip running any tests for that subclass entirely. - *
      • Delegate to that test to build nodes of that type when a - * node of that type is called for. - *
      + * Shim to expose protected methods to ESQL proper's NodeSubclassTests. */ -public class NodeSubclassTests> extends ESTestCase { - - private static final List> CLASSES_WITH_MIN_TWO_CHILDREN = asList(In.class); - - private final Class subclass; - - public NodeSubclassTests(Class subclass) { - this.subclass = subclass; - } - - public void testInfoParameters() throws Exception { - Constructor ctor = longestCtor(subclass); - Object[] nodeCtorArgs = ctorArgs(ctor); - T node = ctor.newInstance(nodeCtorArgs); - /* - * The count should be the same size as the longest constructor - * by convention. If it isn't then we're missing something. - */ - int expectedCount = ctor.getParameterCount(); - /* - * Except the first `Location` argument of the ctor is implicit - * in the parameters and not included. - */ - expectedCount -= 1; - assertEquals(expectedCount, node.info().properties().size()); - } - - /** - * Test {@link Node#transformPropertiesOnly(Class, java.util.function.Function)} - * implementation on {@link #subclass} which tests the implementation of - * {@link Node#info()}. And tests the actual {@link NodeInfo} subclass - * implementations in the process. - */ - public void testTransform() throws Exception { - Constructor ctor = longestCtor(subclass); - Object[] nodeCtorArgs = ctorArgs(ctor); - T node = ctor.newInstance(nodeCtorArgs); - - Type[] argTypes = ctor.getGenericParameterTypes(); - // start at 1 because we can't change Location. - for (int changedArgOffset = 1; changedArgOffset < ctor.getParameterCount(); changedArgOffset++) { - Object originalArgValue = nodeCtorArgs[changedArgOffset]; - - Type changedArgType = argTypes[changedArgOffset]; - Object changedArgValue = randomValueOtherThan(nodeCtorArgs[changedArgOffset], () -> makeArg(changedArgType)); - - B transformed = node.transformNodeProps(Object.class, prop -> Objects.equals(prop, originalArgValue) ? changedArgValue : prop); - - if (node.children().contains(originalArgValue) || node.children().equals(originalArgValue)) { - if (node.children().equals(emptyList()) && originalArgValue.equals(emptyList())) { - /* - * If the children are an empty list and the value - * we want to change is an empty list they'll be - * equal to one another so they'll come on this branch. - * This case is rare and hard to reason about so we're - * just going to assert nothing here and hope to catch - * it when we write non-reflection hack tests. - */ - continue; - } - // Transformation shouldn't apply to children. - assertSame(node, transformed); - } else { - assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, changedArgValue); - } - } - } - - /** - * Test {@link Node#replaceChildren(List)} implementation on {@link #subclass}. - */ - public void testReplaceChildren() throws Exception { - Constructor ctor = longestCtor(subclass); - Object[] nodeCtorArgs = ctorArgs(ctor); - T node = ctor.newInstance(nodeCtorArgs); - - Type[] argTypes = ctor.getGenericParameterTypes(); - // start at 1 because we can't change Location. - for (int changedArgOffset = 1; changedArgOffset < ctor.getParameterCount(); changedArgOffset++) { - Object originalArgValue = nodeCtorArgs[changedArgOffset]; - Type changedArgType = argTypes[changedArgOffset]; - - if (originalArgValue instanceof Collection col) { - - if (col.isEmpty() || col instanceof EnumSet) { - /* - * We skip empty lists here because they'll spuriously - * pass the conditions below if statements even if they don't - * have anything to do with children. This might cause us to - * ignore the case where a parameter gets copied into the - * children and just happens to be empty but I don't really - * know another way. - */ - - continue; - } - - if (col instanceof List originalList && node.children().equals(originalList)) { - // The arg we're looking at *is* the children - @SuppressWarnings("unchecked") // we pass a reasonable type so get reasonable results - List newChildren = (List) makeListOfSameSizeOtherThan(changedArgType, originalList); - B transformed = node.replaceChildren(newChildren); - assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newChildren); - } else if (false == col.isEmpty() && node.children().containsAll(col)) { - // The arg we're looking at is a collection contained within the children - List originalList = (List) originalArgValue; - - // First make the new children - @SuppressWarnings("unchecked") // we pass a reasonable type so get reasonable results - List newCollection = (List) makeListOfSameSizeOtherThan(changedArgType, originalList); - - // Now merge that list of children into the original list of children - List originalChildren = node.children(); - List newChildren = new ArrayList<>(originalChildren.size()); - int originalOffset = 0; - for (int i = 0; i < originalChildren.size(); i++) { - if (originalOffset < originalList.size() && originalChildren.get(i).equals(originalList.get(originalOffset))) { - newChildren.add(newCollection.get(originalOffset)); - originalOffset++; - } else { - newChildren.add(originalChildren.get(i)); - } - } - - // Finally! We can assert..... - B transformed = node.replaceChildren(newChildren); - assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newCollection); - } else { - // The arg we're looking at has nothing to do with the children - } - } else { - if (node.children().contains(originalArgValue)) { - // The arg we're looking at is one of the children - List newChildren = new ArrayList<>(node.children()); - @SuppressWarnings("unchecked") // makeArg produced reasonable values - B newChild = (B) randomValueOtherThan(nodeCtorArgs[changedArgOffset], () -> makeArg(changedArgType)); - newChildren.replaceAll(e -> Objects.equals(originalArgValue, e) ? newChild : e); - B transformed = node.replaceChildren(newChildren); - assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newChild); - } else { - // The arg we're looking at has nothing to do with the children - } - } - } - } - - private void assertTransformedOrReplacedChildren( - T node, - B transformed, - Constructor ctor, - Object[] nodeCtorArgs, - int changedArgOffset, - Object changedArgValue - ) throws Exception { - if (node instanceof Function) { - /* - * Functions have a weaker definition of transform then other - * things: - * - * Transforming using the way we did above should only change - * the one property of the node that we intended to transform. - */ - assertEquals(node.source(), transformed.source()); - List op = node.nodeProperties(); - List tp = transformed.nodeProperties(); - for (int p = 0; p < op.size(); p++) { - if (p == changedArgOffset - 1) { // -1 because location isn't in the list - assertEquals(changedArgValue, tp.get(p)); - } else { - assertEquals(op.get(p), tp.get(p)); - } - } - } else { - /* - * The stronger assertion for all non-Functions: transforming - * a node changes *only* the transformed value such that you - * can rebuild a copy of the node using its constructor changing - * only one argument and it'll be *equal* to the result of the - * transformation. - */ - Type[] argTypes = ctor.getGenericParameterTypes(); - Object[] args = new Object[argTypes.length]; - for (int i = 0; i < argTypes.length; i++) { - args[i] = nodeCtorArgs[i] == nodeCtorArgs[changedArgOffset] ? changedArgValue : nodeCtorArgs[i]; - } - T reflectionTransformed = ctor.newInstance(args); - assertEquals(reflectionTransformed, transformed); - } - } - - /** - * Find the longest constructor of the given class. - * By convention, for all subclasses of {@link Node}, - * this constructor should have "all" of the state of - * the node. All other constructors should all delegate - * to this constructor. - */ - static Constructor longestCtor(Class clazz) { - Constructor longest = null; - for (Constructor ctor : clazz.getConstructors()) { - if (longest == null || longest.getParameterCount() < ctor.getParameterCount()) { - @SuppressWarnings("unchecked") // Safe because the ctor has to be a ctor for T - Constructor castCtor = (Constructor) ctor; - longest = castCtor; - } - } - if (longest == null) { - throw new IllegalArgumentException("Couldn't find any constructors for [" + clazz.getName() + "]"); - } - return longest; - } - - /** - * Scans the {@code .class} files to identify all classes and - * checks if they are subclasses of {@link Node}. - */ - @ParametersFactory - @SuppressWarnings("rawtypes") - public static List nodeSubclasses() throws IOException { - return subclassesOf(Node.class, CLASSNAME_FILTER).stream() - .filter(c -> testClassFor(c) == null) - .map(c -> new Object[] { c }) - .toList(); - } - - /** - * Build a list of arguments to use when calling - * {@code ctor} that make sense when {@code ctor} - * builds subclasses of {@link Node}. - */ - private Object[] ctorArgs(Constructor> ctor) throws Exception { - Type[] argTypes = ctor.getGenericParameterTypes(); - Object[] args = new Object[argTypes.length]; - for (int i = 0; i < argTypes.length; i++) { - final int currentArgIndex = i; - args[i] = randomValueOtherThanMany(candidate -> { - for (int a = 0; a < currentArgIndex; a++) { - if (Objects.equals(args[a], candidate)) { - return true; - } - } - return false; - }, () -> { - try { - return makeArg(ctor.getDeclaringClass(), argTypes[currentArgIndex]); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } - return args; - } - - /** - * Make an argument to feed the {@link #subclass}'s ctor. - */ - protected Object makeArg(Type argType) { - try { - return makeArg(subclass, argType); - } catch (Exception e) { - // Wrap to make `randomValueOtherThan` happy. - throw new RuntimeException(e); - } - } - - /** - * Make an argument to feed to the constructor for {@code toBuildClass}. - */ - @SuppressWarnings("unchecked") - private Object makeArg(Class> toBuildClass, Type argType) throws Exception { - - if (argType instanceof ParameterizedType pt) { - if (pt.getRawType() == Map.class) { - return makeMap(toBuildClass, pt); - } - if (pt.getRawType() == List.class) { - return makeList(toBuildClass, pt); - } - if (pt.getRawType() == Set.class) { - return makeSet(toBuildClass, pt); - } - if (pt.getRawType() == EnumSet.class) { - @SuppressWarnings("rawtypes") - Enum enm = (Enum) makeArg(toBuildClass, pt.getActualTypeArguments()[0]); - return EnumSet.of(enm); - } - Object obj = pluggableMakeParameterizedArg(toBuildClass, pt); - if (obj != null) { - return obj; - } - throw new IllegalArgumentException("Unsupported parameterized type [" + pt + "], for " + toBuildClass.getSimpleName()); - } - if (argType instanceof WildcardType wt) { - if (wt.getLowerBounds().length > 0 || wt.getUpperBounds().length > 1) { - throw new IllegalArgumentException("Unsupported wildcard type [" + wt + "]"); - } - return makeArg(toBuildClass, wt.getUpperBounds()[0]); - } - Class argClass = (Class) argType; - - /* - * Sometimes all of the required type information isn't in the ctor - * so we have to hard code it here. - */ - if (toBuildClass == FieldAttribute.class) { - // `parent` is nullable. - if (argClass == FieldAttribute.class && randomBoolean()) { - return null; - } - } else if (toBuildClass == ChildrenAreAProperty.class) { - /* - * While any subclass of DummyFunction will do here we want to prevent - * stack overflow so we use the one without children. - */ - if (argClass == Dummy.class) { - return makeNode(NoChildren.class); - } - } else if (FullTextPredicate.class.isAssignableFrom(toBuildClass)) { - /* - * FullTextPredicate analyzes its string arguments on - * construction so they have to be valid. - */ - if (argClass == String.class) { - int size = between(0, 5); - StringBuilder b = new StringBuilder(); - for (int i = 0; i < size; i++) { - if (i != 0) { - b.append(';'); - } - b.append(randomAlphaOfLength(5)).append('=').append(randomAlphaOfLength(5)); - } - return b.toString(); - } - } else if (toBuildClass == Like.class) { - - if (argClass == LikePattern.class) { - return new LikePattern(randomAlphaOfLength(16), randomFrom('\\', '|', '/', '`')); - } - - } else { - Object postProcess = pluggableMakeArg(toBuildClass, argClass); - if (postProcess != null) { - return postProcess; - } - } - if (Expression.class == argClass) { - /* - * Rather than use any old subclass of expression lets - * use a simple one. Without this we're very prone to - * stackoverflow errors while building the tree. - */ - return UnresolvedAttributeTests.randomUnresolvedAttribute(); - } - if (EnrichPolicy.class == argClass) { - List enrichFields = randomSubsetOf(List.of("e1", "e2", "e3")); - return new EnrichPolicy(randomFrom("match", "range"), null, List.of(), randomFrom("m1", "m2"), enrichFields); - } - - if (Processor.class == argClass) { - /* - * Similar to expressions, mock pipes to avoid - * stackoverflow errors while building the tree. - */ - return new ConstantProcessor(randomAlphaOfLength(16)); - } - - if (Node.class.isAssignableFrom(argClass)) { - /* - * Rather than attempting to mock subclasses of node - * and emulate them we just try and instantiate an - * appropriate subclass - */ - @SuppressWarnings("unchecked") // safe because this is the lowest possible bounds for Node - Class> asNodeSubclass = (Class>) argType; - return makeNode(asNodeSubclass); - } - - if (argClass.isEnum()) { - // Can't mock enums but luckily we can just pick one - return randomFrom(argClass.getEnumConstants()); - } - if (argClass == boolean.class) { - // Can't mock primitives.... - return randomBoolean(); - } - if (argClass == int.class) { - return randomInt(); - } - if (argClass == String.class) { - // Nor strings - return randomAlphaOfLength(5); - } - if (argClass == Source.class) { - // Location is final and can't be mocked but we have a handy method to generate ones. - return SourceTests.randomSource(); - } - if (argClass == ZoneId.class) { - // ZoneId is a sealed class (cannot be mocked) starting with Java 19 - return randomZone(); - } - try { - return mock(argClass); - } catch (MockitoException e) { - throw new RuntimeException("failed to mock [" + argClass.getName() + "] for [" + toBuildClass.getName() + "]", e); - } - } - - protected Object pluggableMakeArg(Class> toBuildClass, Class argClass) throws Exception { - return null; - } - - protected Object pluggableMakeParameterizedArg(Class> toBuildClass, ParameterizedType pt) { - return null; - } - - private List makeList(Class> toBuildClass, ParameterizedType listType) throws Exception { - return makeList(toBuildClass, listType, randomSizeForCollection(toBuildClass)); - } - - private List makeList(Class> toBuildClass, ParameterizedType listType, int size) throws Exception { - List list = new ArrayList<>(); - for (int i = 0; i < size; i++) { - list.add(makeArg(toBuildClass, listType.getActualTypeArguments()[0])); - } - return list; - } - - private Set makeSet(Class> toBuildClass, ParameterizedType listType) throws Exception { - return makeSet(toBuildClass, listType, randomSizeForCollection(toBuildClass)); - } - - private Set makeSet(Class> toBuildClass, ParameterizedType listType, int size) throws Exception { - Set list = new HashSet<>(); - for (int i = 0; i < size; i++) { - list.add(makeArg(toBuildClass, listType.getActualTypeArguments()[0])); - } - return list; - } - - private Object makeMap(Class> toBuildClass, ParameterizedType pt) throws Exception { - Map map = new HashMap<>(); - int size = randomSizeForCollection(toBuildClass); - while (map.size() < size) { - Object key = makeArg(toBuildClass, pt.getActualTypeArguments()[0]); - Object value = makeArg(toBuildClass, pt.getActualTypeArguments()[1]); - map.put(key, value); - } - return map; - } - - private int randomSizeForCollection(Class> toBuildClass) { - int minCollectionLength = 0; - int maxCollectionLength = 10; - - if (hasAtLeastTwoChildren(toBuildClass)) { - minCollectionLength = 2; - } - return between(minCollectionLength, maxCollectionLength); - } - - protected boolean hasAtLeastTwoChildren(Class> toBuildClass) { - return CLASSES_WITH_MIN_TWO_CHILDREN.stream().anyMatch(toBuildClass::equals); - } - - private List makeListOfSameSizeOtherThan(Type listType, List original) throws Exception { - if (original.isEmpty()) { - throw new IllegalArgumentException("Can't make a different empty list"); - } - return randomValueOtherThan(original, () -> { - try { - return makeList(subclass, (ParameterizedType) listType, original.size()); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - - } - - public > T makeNode(Class nodeClass) throws Exception { - if (Modifier.isAbstract(nodeClass.getModifiers())) { - nodeClass = randomFrom(innerSubclassesOf(nodeClass)); - } - Class testSubclassFor = testClassFor(nodeClass); - if (testSubclassFor != null) { - // Delegate to the test class for a node if there is one - Method m = testSubclassFor.getMethod("random" + Strings.capitalize(nodeClass.getSimpleName())); - assert Modifier.isStatic(m.getModifiers()) : "Expected static method, got:" + m; - return nodeClass.cast(m.invoke(null)); - } - Constructor ctor = longestCtor(nodeClass); - Object[] nodeCtorArgs = ctorArgs(ctor); - return ctor.newInstance(nodeCtorArgs); - } - - /** - * Cache of subclasses. We use a cache because it significantly speeds up - * the test. - */ - private static final Map, Set> subclassCache = new HashMap<>(); - - private static final Predicate CLASSNAME_FILTER = className -> { - // filter the class that are not interested - // (and IDE folders like eclipse) - if (className.startsWith("org.elasticsearch.xpack.esql.core") == false - && className.startsWith("org.elasticsearch.xpack.sql") == false - && className.startsWith("org.elasticsearch.xpack.eql") == false) { - return false; - } - return true; - }; - - protected Predicate pluggableClassNameFilter() { - return CLASSNAME_FILTER; - } - - private Set> innerSubclassesOf(Class clazz) throws IOException { - return subclassesOf(clazz, pluggableClassNameFilter()); - } - - public static Set> subclassesOf(Class clazz) throws IOException { - return subclassesOf(clazz, CLASSNAME_FILTER); - } - - /** - * Find all subclasses of a particular class. - */ - public static Set> subclassesOf(Class clazz, Predicate classNameFilter) throws IOException { - @SuppressWarnings("unchecked") // The map is built this way - Set> lookup = (Set>) subclassCache.get(clazz); - if (lookup != null) { - return lookup; - } - Set> results = new LinkedHashSet<>(); - String[] paths = System.getProperty("java.class.path").split(System.getProperty("path.separator")); - for (String path : paths) { - Path root = PathUtils.get(path); - int rootLength = root.toString().length() + 1; - - // load classes from jar files - // NIO FileSystem API is not used since it trips the SecurityManager - // https://bugs.openjdk.java.net/browse/JDK-8160798 - // so iterate the jar "by hand" - if (path.endsWith(".jar") && path.contains("x-pack-ql")) { - try (JarInputStream jar = jarStream(root)) { - JarEntry je = null; - while ((je = jar.getNextJarEntry()) != null) { - String name = je.getName(); - if (name.endsWith(".class")) { - String className = name.substring(0, name.length() - ".class".length()).replace("/", "."); - maybeLoadClass(clazz, className, root + "!/" + name, classNameFilter, results); - } - } - } - } - // for folders, just use the FileSystems API - else { - Files.walkFileTree(root, new SimpleFileVisitor<>() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { - String fileName = file.toString(); - // Chop off the root and file extension - String className = fileName.substring(rootLength, fileName.length() - ".class".length()); - // Go from "path" style to class style - className = className.replace(PathUtils.getDefaultFileSystem().getSeparator(), "."); - maybeLoadClass(clazz, className, fileName, classNameFilter, results); - } - return FileVisitResult.CONTINUE; - } - }); - } - } - subclassCache.put(clazz, results); - return results; - } - - @SuppressForbidden(reason = "test reads from jar") - private static JarInputStream jarStream(Path path) throws IOException { - return new JarInputStream(path.toUri().toURL().openStream()); - } - - /** - * Load classes from predefined packages (hack to limit the scope) and if they match the hierarchy, add them to the cache - */ - private static void maybeLoadClass( - Class clazz, - String className, - String location, - Predicate classNameFilter, - Set> results - ) throws IOException { - if (classNameFilter.test(className) == false) { - return; - } - - Class c; - try { - c = Class.forName(className); - } catch (ClassNotFoundException e) { - throw new IOException("Couldn't load " + location, e); - } +public class NodeSubclassTests extends ESTestCase { - if (false == Modifier.isAbstract(c.getModifiers()) && false == c.isAnonymousClass() && clazz.isAssignableFrom(c)) { - Class s = c.asSubclass(clazz); - results.add(s); - } + // TODO once Node has been move to ESQL proper remove this shim and these methods. + protected final NodeInfo info(Node node) { + return node.info(); } - /** - * The test class for some subclass of node or {@code null} - * if there isn't such a class or it doesn't extend - * {@link AbstractNodeTestCase}. - */ - protected static Class testClassFor(Class nodeSubclass) { - String testClassName = nodeSubclass.getName() + "Tests"; - try { - Class c = Class.forName(testClassName); - if (AbstractNodeTestCase.class.isAssignableFrom(c)) { - return c; - } - return null; - } catch (ClassNotFoundException e) { - return null; - } + protected final > T transformNodeProps(Node n, Class typeToken, Function rule) { + return n.transformNodeProps(typeToken, rule); } } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TypesTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TypesTests.java index 489666976b592..1974eb3669f4b 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TypesTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TypesTests.java @@ -103,16 +103,6 @@ public void testDateMulti() { assertThat(field, is(instanceOf(DateEsField.class))); } - public void testDateNanosField() { - Map mapping = loadMapping("mapping-date_nanos.json"); - - assertThat(mapping.size(), is(1)); - EsField field = mapping.get("date_nanos"); - assertThat(field.getDataType(), is(DATETIME)); - assertThat(field.isAggregatable(), is(true)); - assertThat(field, is(instanceOf(DateEsField.class))); - } - public void testDocValueField() { Map mapping = loadMapping("mapping-docvalues.json"); diff --git a/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/CsvSpecReader.java b/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/CsvSpecReader.java index a1f524e525eee..8e5a228af00d6 100644 --- a/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/CsvSpecReader.java +++ b/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/CsvSpecReader.java @@ -15,7 +15,6 @@ import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; public final class CsvSpecReader { @@ -113,34 +112,16 @@ public static class CsvTestCase { public boolean ignoreOrder; public List requiredCapabilities = List.of(); - // The emulated-specific warnings must always trail the non-emulated ones, if these are present. Otherwise, the closing bracket - // would need to be changed to a less common sequence (like `]#` maybe). - private static final String EMULATED_PREFIX = "#[emulated:"; - /** * Returns the warning headers expected to be added by the test. To declare such a header, use the `warning:definition` format * in the CSV test declaration. The `definition` can use the `EMULATED_PREFIX` string to specify the format of the warning run on * emulated physical operators, if this differs from the format returned by SingleValueQuery. - * @param forEmulated if true, the tests are run on emulated physical operators; if false, the test case is for queries executed - * on a "full stack" ESQL, having data loaded from Lucene. * @return the list of headers that are expected to be returned part of the response. */ - public List expectedWarnings(boolean forEmulated) { + public List expectedWarnings() { List warnings = new ArrayList<>(expectedWarnings.size()); for (String warning : expectedWarnings) { - int idx = warning.toLowerCase(Locale.ROOT).indexOf(EMULATED_PREFIX); - if (idx >= 0) { - assertTrue("Invalid warning spec: closing delimiter (]) missing: `" + warning + "`", warning.endsWith("]")); - if (forEmulated) { - if (idx + EMULATED_PREFIX.length() < warning.length() - 1) { - warnings.add(warning.substring(idx + EMULATED_PREFIX.length(), warning.length() - 1)); - } - } else if (idx > 0) { - warnings.add(warning.substring(0, idx)); - } // else: no warnings expected for non-emulated - } else { - warnings.add(warning); - } + warnings.add(warning); } return warnings; } diff --git a/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/SpecReader.java b/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/SpecReader.java index 422a5b744eed0..c96f360cc95f0 100644 --- a/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/SpecReader.java +++ b/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/SpecReader.java @@ -79,7 +79,7 @@ public static List readURLSpec(URL source, Parser parser) throws Excep Object result = parser.parse(line); // only if the parser is ready, add the object - otherwise keep on serving it lines if (result != null) { - testCases.add(new Object[] { fileName, groupName, testName, Integer.valueOf(lineNumber), result }); + testCases.add(makeTestCase(fileName, groupName, testName, lineNumber, result)); testName = null; } } @@ -102,4 +102,13 @@ public interface Parser { public static boolean shouldSkipLine(String line) { return line.isEmpty() || line.startsWith("//") || line.startsWith("#"); } + + private static Object[] makeTestCase(String fileName, String groupName, String testName, int lineNumber, Object result) { + var testNameParts = testName.split("#", 2); + + testName = testNameParts[0]; + var instructions = testNameParts.length == 2 ? testNameParts[1] : ""; + + return new Object[] { fileName, groupName, testName, lineNumber, result, instructions }; + } } diff --git a/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/TestUtils.java b/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/TestUtils.java index 35d73f87f2ceb..5f774ad9dd60e 100644 --- a/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/TestUtils.java +++ b/x-pack/plugin/esql-core/test-fixtures/src/main/java/org/elasticsearch/xpack/esql/core/TestUtils.java @@ -36,8 +36,6 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardLike; import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; -import org.elasticsearch.xpack.esql.core.index.EsIndex; -import org.elasticsearch.xpack.esql.core.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.core.session.Configuration; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -165,10 +163,6 @@ public static FieldAttribute fieldAttribute(String name, DataType type) { return new FieldAttribute(EMPTY, name, new EsField(name, type, emptyMap(), randomBoolean())); } - public static EsRelation relation() { - return new EsRelation(EMPTY, new EsIndex(randomAlphaOfLength(8), emptyMap()), randomBoolean()); - } - // // Common methods / assertions // diff --git a/x-pack/plugin/esql/arrow/build.gradle b/x-pack/plugin/esql/arrow/build.gradle new file mode 100644 index 0000000000000..e8ae4b049cf7d --- /dev/null +++ b/x-pack/plugin/esql/arrow/build.gradle @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.build' + +dependencies { + compileOnly project(':server') + compileOnly project(':x-pack:plugin:esql:compute') + compileOnly project(':x-pack:plugin:esql-core') + compileOnly project(':x-pack:plugin:mapper-version') + implementation('org.apache.arrow:arrow-vector:16.1.0') + implementation('org.apache.arrow:arrow-format:16.1.0') + implementation('org.apache.arrow:arrow-memory-core:16.1.0') + implementation('org.checkerframework:checker-qual:3.42.0') + implementation('com.google.flatbuffers:flatbuffers-java:23.5.26') + // Needed for the json arrow serialization, and loaded even if we don't use it. + implementation("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") + implementation("com.fasterxml.jackson.core:jackson-core:${versions.jackson}") + implementation("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") + implementation("org.slf4j:slf4j-api:${versions.slf4j}") + runtimeOnly "org.slf4j:slf4j-nop:${versions.slf4j}" + + testImplementation project(':test:framework') + testImplementation('org.apache.arrow:arrow-memory-unsafe:16.1.0') +} + +tasks.named("dependencyLicenses").configure { + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /arrow-.*/, to: 'arrow' + mapping from: /slf4j-.*/, to: 'slf4j' +} + +tasks.named("thirdPartyAudit").configure { + ignoreViolations( + // uses sun.misc.Unsafe. Only used in tests. + 'org.apache.arrow.memory.util.hash.SimpleHasher', + 'org.apache.arrow.memory.util.hash.MurmurHasher', + 'org.apache.arrow.memory.util.MemoryUtil', + 'org.apache.arrow.memory.util.MemoryUtil$1', + 'org.apache.arrow.vector.DecimalVector', + 'org.apache.arrow.vector.BaseFixedWidthVector', + 'org.apache.arrow.vector.util.DecimalUtility', + 'org.apache.arrow.vector.Decimal256Vector', + 'org.apache.arrow.vector.util.VectorAppender', + 'org.apache.arrow.memory.ArrowBuf', + 'org.apache.arrow.vector.BitVectorHelper', + 'org.apache.arrow.memory.util.ByteFunctionHelpers', + ) + ignoreMissingClasses( + 'org.apache.commons.codec.binary.Hex' + ) +} + +test { + jvmArgs('--add-opens=java.base/java.nio=ALL-UNNAMED') +} diff --git a/x-pack/plugin/esql/arrow/licenses/arrow-LICENSE.txt b/x-pack/plugin/esql/arrow/licenses/arrow-LICENSE.txt new file mode 100644 index 0000000000000..7bb1330a1002b --- /dev/null +++ b/x-pack/plugin/esql/arrow/licenses/arrow-LICENSE.txt @@ -0,0 +1,2261 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +src/arrow/util (some portions): Apache 2.0, and 3-clause BSD + +Some portions of this module are derived from code in the Chromium project, +copyright (c) Google inc and (c) The Chromium Authors and licensed under the +Apache 2.0 License or the under the 3-clause BSD license: + + Copyright (c) 2013 The Chromium Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from Daniel Lemire's FrameOfReference project. + +https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py + +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/FrameOfReference +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the TensorFlow project + +Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the NumPy project. + +https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 + +https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the Boost project + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from the FlatBuffers project + +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the tslib project + +Copyright 2015 Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the jemalloc project + +https://github.com/jemalloc/jemalloc + +Copyright (C) 2002-2017 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- + +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS). + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the hs2client + +https://github.com/cloudera/hs2client + +Copyright 2016 Cloudera Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +The script ci/scripts/util_wait_for_it.sh has the following license + +Copyright (c) 2016 Giles Hall + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The script r/configure has the following license (MIT) + +Copyright (c) 2017, Jeroen Ooms and Jim Hester + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and +cpp/src/arrow/util/logging-test.cc are adapted from +Ray Project (https://github.com/ray-project/ray) (Apache 2.0). + +Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, +cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, +cpp/src/arrow/vendored/datetime/ios.mm, +cpp/src/arrow/vendored/datetime/tz.cpp are adapted from +Howard Hinnant's date library (https://github.com/HowardHinnant/date) +It is licensed under MIT license. + +The MIT License (MIT) +Copyright (c) 2015, 2016, 2017 Howard Hinnant +Copyright (c) 2016 Adrian Colomitchi +Copyright (c) 2017 Florian Dang +Copyright (c) 2017 Paul Thompson +Copyright (c) 2018 Tomasz Kamiński + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/util/utf8.h includes code adapted from the page + https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +with the following license (MIT) + +Copyright (c) 2008-2009 Bjoern Hoehrmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/xxhash/ have the following license +(BSD 2-Clause License) + +xxHash Library +Copyright (c) 2012-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- xxHash homepage: http://www.xxhash.com +- xxHash source repository : https://github.com/Cyan4973/xxHash + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/double-conversion/ have the following license +(BSD 3-Clause License) + +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/uriparser/ have the following license +(BSD 3-Clause License) + +uriparser - RFC 3986 URI parsing library + +Copyright (C) 2007, Weijia Song +Copyright (C) 2007, Sebastian Pipping +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + * Neither the name of the nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files under dev/tasks/conda-recipes have the following license + +BSD 3-clause license +Copyright (c) 2015-2018, conda-forge +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/utfcpp/ have the following license + +Copyright 2006-2018 Nemanja Trifunovic + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from Apache Kudu. + + * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake + +Copyright: 2016 The Apache Software Foundation. +Home page: https://kudu.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Impala (incubating), formerly +Impala. The Impala code and rights were donated to the ASF as part of the +Incubator process after the initial code imports into Apache Parquet. + +Copyright: 2012 Cloudera, Inc. +Copyright: 2016 The Apache Software Foundation. +Home page: http://impala.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Aurora. + +* dev/release/{release,changelog,release-candidate} are based on the scripts from + Apache Aurora + +Copyright: 2016 The Apache Software Foundation. +Home page: https://aurora.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the Google styleguide. + +* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/styleguide +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from Snappy. + +* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code + from Google's Snappy project. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/snappy +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from the manylinux project. + +* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, + requirements.txt} are based on code from the manylinux project. + +Copyright: 2016 manylinux +Homepage: https://github.com/pypa/manylinux +License: The MIT License (MIT) + +-------------------------------------------------------------------------------- + +This project includes code from the cymove project: + +* python/pyarrow/includes/common.pxd includes code from the cymove project + +The MIT License (MIT) +Copyright (c) 2019 Omer Ozarslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The projects includes code from the Ursabot project under the dev/archery +directory. + +License: BSD 2-Clause + +Copyright 2019 RStudio, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project include code from mingw-w64. + +* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 + +Copyright (c) 2009 - 2013 by the mingw-w64 project +Homepage: https://mingw-w64.org +License: Zope Public License (ZPL) Version 2.1. + +--------------------------------------------------------------------------------- + +This project include code from Google's Asylo project. + +* cpp/src/arrow/result.h is based on status_or.h + +Copyright (c) Copyright 2017 Asylo authors +Homepage: https://asylo.dev/ +License: Apache 2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Google's protobuf project + +* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN +* cpp/src/arrow/util/bit_stream_utils.h contains code from wire_format_lite.h + +Copyright 2008 Google Inc. All rights reserved. +Homepage: https://developers.google.com/protocol-buffers/ +License: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +-------------------------------------------------------------------------------- + +3rdparty dependency LLVM is statically linked in certain binary distributions. +Additionally some sections of source code have been derived from sources in LLVM +and have been clearly labeled as such. LLVM has the following license: + +============================================================================== +The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: +============================================================================== + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. + +-------------------------------------------------------------------------------- + +3rdparty dependency gRPC is statically linked in certain binary +distributions, like the python wheels. gRPC has the following license: + +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache Thrift is statically linked in certain binary +distributions, like the python wheels. Apache Thrift has the following license: + +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache ORC is statically linked in certain binary +distributions, like the python wheels. Apache ORC has the following license: + +Apache ORC +Copyright 2013-2019 The Apache Software Foundation + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by Hewlett-Packard: +(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency zstd is statically linked in certain binary +distributions, like the python wheels. ZSTD has the following license: + +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency lz4 is statically linked in certain binary +distributions, like the python wheels. lz4 has the following license: + +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency Brotli is statically linked in certain binary +distributions, like the python wheels. Brotli has the following license: + +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency rapidjson is statically linked in certain binary +distributions, like the python wheels. rapidjson and its dependencies have the +following licenses: + +Tencent is pleased to support the open source community by making RapidJSON +available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. +All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note +that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please +note that RapidJSON source code is licensed under the MIT License, except for +the third-party components listed below which are subject to different license +terms. Your integration of RapidJSON into your own projects may require +compliance with the MIT License, as well as the other licenses applicable to +the third-party components included within RapidJSON. To avoid the problematic +JSON license in your own projects, it's sufficient to exclude the +bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + + Open Source Software Licensed Under the BSD License: + -------------------------------------------------------------------- + + The msinttypes r29 + Copyright (c) 2006-2013 Alexander Chemeris + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + + Terms of the MIT License: + -------------------------------------------------------------------- + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency snappy is statically linked in certain binary +distributions, like the python wheels. snappy has the following license: + +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=== + +Some of the benchmark data in testdata/ is licensed differently: + + - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and + is licensed under the Creative Commons Attribution 3.0 license + (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ + for more information. + + - kppkn.gtb is taken from the Gaviota chess tablebase set, and + is licensed under the MIT License. See + https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 + for more information. + + - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper + “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA + Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, + which is licensed under the CC-BY license. See + http://www.ploscompbiol.org/static/license for more ifnormation. + + - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project + Gutenberg. The first three have expired copyrights and are in the public + domain; the latter does not have expired copyright, but is still in the + public domain according to the license information + (http://www.gutenberg.org/ebooks/53). + +-------------------------------------------------------------------------------- + +3rdparty dependency gflags is statically linked in certain binary +distributions, like the python wheels. gflags has the following license: + +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency glog is statically linked in certain binary +distributions, like the python wheels. glog has the following license: + +Copyright (c) 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +A function gettimeofday in utilities.cc is based on + +http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd + +The license of this code is: + +Copyright (c) 2003-2008, Jouni Malinen and contributors +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name(s) of the above-listed copyright holder(s) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency re2 is statically linked in certain binary +distributions, like the python wheels. re2 has the following license: + +Copyright (c) 2009 The RE2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency c-ares is statically linked in certain binary +distributions, like the python wheels. c-ares has the following license: + +# c-ares license + +Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS +file. + +Copyright 1998 by the Massachusetts Institute of Technology. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior permission. +M.I.T. makes no representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied warranty. + +-------------------------------------------------------------------------------- + +3rdparty dependency zlib is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. In the future +this will likely change to static linkage. zlib has the following license: + +zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +-------------------------------------------------------------------------------- + +3rdparty dependency openssl is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. openssl +preceding version 3 has the following license: + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +-------------------------------------------------------------------------------- + +This project includes code from the rtools-backports project. + +* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code + from the rtools-backports project. + +Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. +All rights reserved. +Homepage: https://github.com/r-windows/rtools-backports +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +Some code from pandas has been adapted for the pyarrow codebase. pandas is +available under the 3-clause BSD license, which follows: + +pandas license +============== + +Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2008-2011 AQR Capital Management, LLC +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the copyright holder nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +Some bits from DyND, in particular aspects of the build system, have been +adapted from libdynd and dynd-python under the terms of the BSD 2-clause +license + +The BSD 2-Clause License + + Copyright (C) 2011-12, Dynamic NDArray Developers + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Dynamic NDArray Developers list: + + * Mark Wiebe + * Continuum Analytics + +-------------------------------------------------------------------------------- + +Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted +for PyArrow. Ibis is released under the Apache License, Version 2.0. + +-------------------------------------------------------------------------------- + +dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: + +BSD 2-Clause License + +Copyright (c) 2009-present, Homebrew contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- + +cpp/src/arrow/vendored/base64.cpp has the following license + +ZLIB License + +Copyright (C) 2004-2017 René Nyffenegger + +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. + +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: + +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + +3. This notice may not be removed or altered from any source distribution. + +René Nyffenegger rene.nyffenegger@adp-gmbh.ch + +-------------------------------------------------------------------------------- + +This project includes code from Folly. + + * cpp/src/arrow/vendored/ProducerConsumerQueue.h + +is based on Folly's + + * folly/Portability.h + * folly/lang/Align.h + * folly/ProducerConsumerQueue.h + +Copyright: Copyright (c) Facebook, Inc. and its affiliates. +Home page: https://github.com/facebook/folly +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/musl/strptime.c has the following license + +Copyright © 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/cmake_modules/BuildUtils.cmake contains code from + +https://gist.github.com/cristianadam/ef920342939a89fae3e8a85ca9459b49 + +which is made available under the MIT license + +Copyright (c) 2019 Cristian Adam + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/portable-snippets/ contain code from + +https://github.com/nemequ/portable-snippets + +and have the following copyright notice: + +Each source file contains a preamble explaining the license situation +for that file, which takes priority over this file. With the +exception of some code pulled in from other repositories (such as +µnit, an MIT-licensed project which is used for testing), the code is +public domain, released using the CC0 1.0 Universal dedication (*). + +(*) https://creativecommons.org/publicdomain/zero/1.0/legalcode + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/fast_float/ contain code from + +https://github.com/lemire/fast_float + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/docscrape.py contains code from + +https://github.com/numpy/numpydoc/ + +which is made available under the BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/version.py contains code from + +https://github.com/pypa/packaging/ + +which is made available under both the Apache license v2.0 and the +BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/pcg contain code from + +https://github.com/imneme/pcg-cpp + +and have the following copyright notice: + +Copyright 2014-2019 Melissa O'Neill , + and the PCG Project contributors. + +SPDX-License-Identifier: (Apache-2.0 OR MIT) + +Licensed under the Apache License, Version 2.0 (provided in +LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) +or under the MIT license (provided in LICENSE-MIT.txt and at +http://opensource.org/licenses/MIT), at your option. This file may not +be copied, modified, or distributed except according to those terms. + +Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either +express or implied. See your chosen license for details. + +-------------------------------------------------------------------------------- +r/R/dplyr-count-tally.R (some portions) + +Some portions of this file are derived from code from + +https://github.com/tidyverse/dplyr/ + +which is made available under the MIT license + +Copyright (c) 2013-2019 RStudio and others. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file src/arrow/util/io_util.cc contains code from the CPython project +which is made available under the Python Software Foundation License Version 2. + +-------------------------------------------------------------------------------- + +3rdparty dependency opentelemetry-cpp is statically linked in certain binary +distributions. opentelemetry-cpp is made available under the Apache License 2.0. + +Copyright The OpenTelemetry Authors +SPDX-License-Identifier: Apache-2.0 + +-------------------------------------------------------------------------------- + +ci/conan/ is based on code from Conan Package and Dependency Manager. + +Copyright (c) 2019 Conan.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency UCX is redistributed as a dynamically linked shared +library in certain binary distributions. UCX has the following license: + +Copyright (c) 2014-2015 UT-Battelle, LLC. All rights reserved. +Copyright (C) 2014-2020 Mellanox Technologies Ltd. All rights reserved. +Copyright (C) 2014-2015 The University of Houston System. All rights reserved. +Copyright (C) 2015 The University of Tennessee and The University + of Tennessee Research Foundation. All rights reserved. +Copyright (C) 2016-2020 ARM Ltd. All rights reserved. +Copyright (c) 2016 Los Alamos National Security, LLC. All rights reserved. +Copyright (C) 2016-2020 Advanced Micro Devices, Inc. All rights reserved. +Copyright (C) 2019 UChicago Argonne, LLC. All rights reserved. +Copyright (c) 2018-2020 NVIDIA CORPORATION. All rights reserved. +Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved. +Copyright (C) 2016-2020 Stony Brook University. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The file dev/tasks/r/github.packages.yml contains code from + +https://github.com/ursa-labs/arrow-r-nightly + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/JoshPiper/rsync-docker + +which is made available under the MIT license + +Copyright (c) 2020 Joshua Piper + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/burnett01/rsync-deployments + +which is made available under the MIT license + +Copyright (c) 2019-2022 Contention +Copyright (c) 2019-2022 Burnett01 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectHashMap.java +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectMap.java + +These file are derived from code from Netty, which is made available under the +Apache License 2.0. diff --git a/x-pack/plugin/esql/arrow/licenses/arrow-NOTICE.txt b/x-pack/plugin/esql/arrow/licenses/arrow-NOTICE.txt new file mode 100644 index 0000000000000..2089c6fb20358 --- /dev/null +++ b/x-pack/plugin/esql/arrow/licenses/arrow-NOTICE.txt @@ -0,0 +1,84 @@ +Apache Arrow +Copyright 2016-2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes software from the SFrame project (BSD, 3-clause). +* Copyright (C) 2015 Dato, Inc. +* Copyright (c) 2009 Carnegie Mellon University. + +This product includes software from the Feather project (Apache 2.0) +https://github.com/wesm/feather + +This product includes software from the DyND project (BSD 2-clause) +https://github.com/libdynd + +This product includes software from the LLVM project + * distributed under the University of Illinois Open Source + +This product includes software from the google-lint project + * Copyright (c) 2009 Google Inc. All rights reserved. + +This product includes software from the mman-win32 project + * Copyright https://code.google.com/p/mman-win32/ + * Licensed under the MIT License; + +This product includes software from the LevelDB project + * Copyright (c) 2011 The LevelDB Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * Moved from Kudu http://github.com/cloudera/kudu + +This product includes software from the CMake project + * Copyright 2001-2009 Kitware, Inc. + * Copyright 2012-2014 Continuum Analytics, Inc. + * All rights reserved. + +This product includes software from https://github.com/matthew-brett/multibuild (BSD 2-clause) + * Copyright (c) 2013-2016, Matt Terry and Matthew Brett; all rights reserved. + +This product includes software from the Ibis project (Apache 2.0) + * Copyright (c) 2015 Cloudera, Inc. + * https://github.com/cloudera/ibis + +This product includes software from Dremio (Apache 2.0) + * Copyright (C) 2017-2018 Dremio Corporation + * https://github.com/dremio/dremio-oss + +This product includes software from Google Guava (Apache 2.0) + * Copyright (C) 2007 The Guava Authors + * https://github.com/google/guava + +This product include software from CMake (BSD 3-Clause) + * CMake - Cross Platform Makefile Generator + * Copyright 2000-2019 Kitware, Inc. and Contributors + +The web site includes files generated by Jekyll. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Kudu, which includes the following in +its NOTICE file: + + Apache Kudu + Copyright 2016 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were developed at + Cloudera, Inc (http://www.cloudera.com/). + +-------------------------------------------------------------------------------- + +This product includes code from Apache ORC, which includes the following in +its NOTICE file: + + Apache ORC + Copyright 2013-2019 The Apache Software Foundation + + This product includes software developed by The Apache Software + Foundation (http://www.apache.org/). + + This product includes software developed by Hewlett-Packard: + (c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P diff --git a/x-pack/plugin/esql/arrow/licenses/checker-qual-LICENSE.txt b/x-pack/plugin/esql/arrow/licenses/checker-qual-LICENSE.txt new file mode 100644 index 0000000000000..9837c6b69fdab --- /dev/null +++ b/x-pack/plugin/esql/arrow/licenses/checker-qual-LICENSE.txt @@ -0,0 +1,22 @@ +Checker Framework qualifiers +Copyright 2004-present by the Checker Framework developers + +MIT License: + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/x-pack/plugin/esql/arrow/licenses/checker-qual-NOTICE.txt b/x-pack/plugin/esql/arrow/licenses/checker-qual-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/core/licenses/nimbus-jose-jwt-LICENSE.txt b/x-pack/plugin/esql/arrow/licenses/flatbuffers-java-LICENSE.txt similarity index 100% rename from x-pack/plugin/core/licenses/nimbus-jose-jwt-LICENSE.txt rename to x-pack/plugin/esql/arrow/licenses/flatbuffers-java-LICENSE.txt diff --git a/x-pack/plugin/esql/arrow/licenses/flatbuffers-java-NOTICE.txt b/x-pack/plugin/esql/arrow/licenses/flatbuffers-java-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/preallocate/licenses/jna-LICENSE.txt b/x-pack/plugin/esql/arrow/licenses/jackson-LICENSE.txt similarity index 89% rename from libs/preallocate/licenses/jna-LICENSE.txt rename to x-pack/plugin/esql/arrow/licenses/jackson-LICENSE.txt index f433b1a53f5b8..d645695673349 100644 --- a/libs/preallocate/licenses/jna-LICENSE.txt +++ b/x-pack/plugin/esql/arrow/licenses/jackson-LICENSE.txt @@ -175,3 +175,28 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/esql/arrow/licenses/jackson-NOTICE.txt b/x-pack/plugin/esql/arrow/licenses/jackson-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/esql/arrow/licenses/slf4j-LICENSE.txt b/x-pack/plugin/esql/arrow/licenses/slf4j-LICENSE.txt new file mode 100644 index 0000000000000..f687729a0b452 --- /dev/null +++ b/x-pack/plugin/esql/arrow/licenses/slf4j-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2022 QOS.ch Sarl (Switzerland) +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/x-pack/plugin/esql/arrow/licenses/slf4j-NOTICE.txt b/x-pack/plugin/esql/arrow/licenses/slf4j-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/AllocationManagerShim.java b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/AllocationManagerShim.java new file mode 100644 index 0000000000000..b52d1053ff595 --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/AllocationManagerShim.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.arrow; + +import org.apache.arrow.memory.AllocationManager; +import org.apache.arrow.memory.ArrowBuf; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.DefaultAllocationManagerOption; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + +import java.lang.reflect.Field; +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * An Arrow memory allocation manager that always fails. + *

      + * We don't actually use Arrow's memory manager as we stream dataframe buffers directly from ESQL blocks. + * But Arrow won't initialize properly unless it has one (and requires either the arrow-memory-netty or arrow-memory-unsafe libraries). + * It also does some fancy classpath scanning and calls to {@code setAccessible} which will be rejected by the security manager. + *

      + * So we configure an allocation manager that will fail on any attempt to allocate memory. + * + * @see DefaultAllocationManagerOption + */ +public class AllocationManagerShim implements AllocationManager.Factory { + + private static final Logger logger = LogManager.getLogger(AllocationManagerShim.class); + + /** + * Initialize the Arrow memory allocation manager shim. + */ + @SuppressForbidden(reason = "Inject the default Arrow memory allocation manager") + public static void init() { + try { + Class.forName("org.elasticsearch.test.ESTestCase"); + logger.info("We're in tests, not disabling Arrow memory manager so we can use a real runtime for testing"); + } catch (ClassNotFoundException notfound) { + logger.debug("Disabling Arrow's allocation manager"); + AccessController.doPrivileged((PrivilegedAction) () -> { + try { + Field field = DefaultAllocationManagerOption.class.getDeclaredField("DEFAULT_ALLOCATION_MANAGER_FACTORY"); + field.setAccessible(true); + field.set(null, new AllocationManagerShim()); + } catch (Exception e) { + throw new AssertionError("Can't init Arrow", e); + } + return null; + }); + } + } + + @Override + public AllocationManager create(BufferAllocator accountingAllocator, long size) { + throw new UnsupportedOperationException("Arrow memory manager is disabled"); + } + + @Override + public ArrowBuf empty() { + throw new UnsupportedOperationException("Arrow memory manager is disabled"); + } +} diff --git a/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ArrowFormat.java b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ArrowFormat.java new file mode 100644 index 0000000000000..762c95cdce3e7 --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ArrowFormat.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.arrow; + +import org.elasticsearch.xcontent.MediaType; + +import java.util.Map; +import java.util.Set; + +public class ArrowFormat implements MediaType { + public static final ArrowFormat INSTANCE = new ArrowFormat(); + + private static final String FORMAT = "arrow"; + // See https://www.iana.org/assignments/media-types/application/vnd.apache.arrow.stream + public static final String CONTENT_TYPE = "application/vnd.apache.arrow.stream"; + private static final String VENDOR_CONTENT_TYPE = "application/vnd.elasticsearch+arrow+stream"; + + @Override + public String queryParameter() { + return FORMAT; + } + + @Override + public Set headerValues() { + return Set.of( + new HeaderValue(CONTENT_TYPE, Map.of("header", "present|absent")), + new HeaderValue(VENDOR_CONTENT_TYPE, Map.of("header", "present|absent", COMPATIBLE_WITH_PARAMETER_NAME, VERSION_PATTERN)) + ); + } +} diff --git a/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ArrowResponse.java b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ArrowResponse.java new file mode 100644 index 0000000000000..8c2243284a538 --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ArrowResponse.java @@ -0,0 +1,379 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.arrow; + +import org.apache.arrow.memory.ArrowBuf; +import org.apache.arrow.vector.compression.NoCompressionCodec; +import org.apache.arrow.vector.ipc.ArrowStreamWriter; +import org.apache.arrow.vector.ipc.WriteChannel; +import org.apache.arrow.vector.ipc.message.ArrowFieldNode; +import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; +import org.apache.arrow.vector.ipc.message.IpcOption; +import org.apache.arrow.vector.ipc.message.MessageSerializer; +import org.apache.arrow.vector.types.Types.MinorType; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.io.stream.BytesStream; +import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.rest.ChunkedRestResponseBodyPart; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public class ArrowResponse implements ChunkedRestResponseBodyPart, Releasable { + + public static class Column { + private final BlockConverter converter; + private final String name; + + public Column(String esqlType, String name) { + this.converter = ESQL_CONVERTERS.get(esqlType); + if (converter == null) { + throw new IllegalArgumentException("ES|QL type [" + esqlType + "] is not supported by the Arrow format"); + } + this.name = name; + } + } + + private final List columns; + private Iterator segments; + private ResponseSegment currentSegment; + + public ArrowResponse(List columns, List pages) { + this.columns = columns; + + currentSegment = new SchemaResponse(this); + List rest = new ArrayList<>(pages.size()); + for (int p = 0; p < pages.size(); p++) { + var page = pages.get(p); + rest.add(new PageResponse(this, page)); + // Multivalued fields are not supported yet. + for (int b = 0; b < page.getBlockCount(); b++) { + if (page.getBlock(b).mayHaveMultivaluedFields()) { + throw new IllegalArgumentException( + "ES|QL response field [" + columns.get(b).name + "] is multi-valued. This isn't supported yet by the Arrow format" + ); + } + } + } + rest.add(new EndResponse(this)); + segments = rest.iterator(); + } + + @Override + public boolean isPartComplete() { + return currentSegment == null; + } + + @Override + public boolean isLastPart() { + // Even if sent in chunks, the entirety of ESQL data is available, so it's single (chunked) part + return true; + } + + @Override + public void getNextPart(ActionListener listener) { + listener.onFailure(new IllegalStateException("no continuations available")); + } + + @Override + public ReleasableBytesReference encodeChunk(int sizeHint, Recycler recycler) throws IOException { + try { + return currentSegment.encodeChunk(sizeHint, recycler); + } finally { + if (currentSegment.isDone()) { + currentSegment = segments.hasNext() ? segments.next() : null; + } + } + } + + @Override + public String getResponseContentTypeString() { + return ArrowFormat.CONTENT_TYPE; + } + + @Override + public void close() { + currentSegment = null; + segments = null; + } + + /** + * An Arrow response is composed of different segments, each being a set of chunks: + * the schema header, the data buffers, and the trailer. + */ + protected abstract static class ResponseSegment { + static { + // Init the Arrow memory manager shim + AllocationManagerShim.init(); + } + + protected final ArrowResponse response; + + ResponseSegment(ArrowResponse response) { + this.response = response; + } + + public final ReleasableBytesReference encodeChunk(int sizeHint, Recycler recycler) throws IOException { + RecyclerBytesStreamOutput output = new RecyclerBytesStreamOutput(recycler); + try { + encodeChunk(sizeHint, output); + BytesReference ref = output.bytes(); + RecyclerBytesStreamOutput closeRef = output; + output = null; + ReleasableBytesReference result = new ReleasableBytesReference(ref, () -> Releasables.closeExpectNoException(closeRef)); + return result; + } finally { + Releasables.closeExpectNoException(output); + } + } + + protected abstract void encodeChunk(int sizeHint, RecyclerBytesStreamOutput out) throws IOException; + + protected abstract boolean isDone(); + + /** + * Adapts a {@link BytesStream} so that Arrow can write to it. + */ + protected static WritableByteChannel arrowOut(BytesStream output) { + return new WritableByteChannel() { + @Override + public int write(ByteBuffer byteBuffer) throws IOException { + if (byteBuffer.hasArray() == false) { + throw new AssertionError("only implemented for array backed buffers"); + } + int length = byteBuffer.remaining(); + output.write(byteBuffer.array(), byteBuffer.arrayOffset() + byteBuffer.position(), length); + byteBuffer.position(byteBuffer.position() + length); + assert byteBuffer.hasRemaining() == false; + return length; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + }; + } + } + + /** + * Header part of the Arrow response containing the dataframe schema. + * + * @see IPC Streaming Format + */ + private static class SchemaResponse extends ResponseSegment { + private boolean done = false; + + SchemaResponse(ArrowResponse response) { + super(response); + } + + @Override + public boolean isDone() { + return done; + } + + @Override + protected void encodeChunk(int sizeHint, RecyclerBytesStreamOutput out) throws IOException { + WriteChannel arrowOut = new WriteChannel(arrowOut(out)); + MessageSerializer.serialize(arrowOut, arrowSchema()); + done = true; + } + + private Schema arrowSchema() { + return new Schema(response.columns.stream().map(c -> new Field(c.name, c.converter.arrowFieldType(), List.of())).toList()); + } + } + + /** + * Page response segment: write an ES|QL page as an Arrow RecordBatch + */ + private static class PageResponse extends ResponseSegment { + private final Page page; + private boolean done = false; + + PageResponse(ArrowResponse response, Page page) { + super(response); + this.page = page; + } + + @Override + public boolean isDone() { + return done; + } + + // Writes some data and returns the number of bytes written. + interface BufWriter { + long write() throws IOException; + } + + @Override + protected void encodeChunk(int sizeHint, RecyclerBytesStreamOutput out) throws IOException { + // An Arrow record batch consists of: + // - fields metadata, giving the number of items and the number of null values for each field + // - data buffers for each field. The number of buffers for a field depends on its type, e.g.: + // - for primitive types, there's a validity buffer (for nulls) and a value buffer. + // - for strings, there's a validity buffer, an offsets buffer and a data buffer + // See https://arrow.apache.org/docs/format/Columnar.html#recordbatch-message + + // Field metadata + List nodes = new ArrayList<>(page.getBlockCount()); + + // Buffers added to the record batch. They're used to track data size so that Arrow can compute offsets + // but contain no data. Actual writing will be done by the bufWriters. This avoids having to deal with + // Arrow's memory management, and in the future will allow direct write from ESQL block vectors. + List bufs = new ArrayList<>(page.getBlockCount() * 2); + + // Closures that will actually write a Block's data. Maps 1:1 to `bufs`. + List bufWriters = new ArrayList<>(page.getBlockCount() * 2); + + // Give Arrow a WriteChannel that will iterate on `bufWriters` when requested to write a buffer. + WriteChannel arrowOut = new WriteChannel(arrowOut(out)) { + int bufIdx = 0; + long extraPosition = 0; + + @Override + public void write(ArrowBuf buffer) throws IOException { + extraPosition += bufWriters.get(bufIdx++).write(out); + } + + @Override + public long getCurrentPosition() { + return super.getCurrentPosition() + extraPosition; + } + + @Override + public long align() throws IOException { + int trailingByteSize = (int) (getCurrentPosition() % 8); + if (trailingByteSize != 0) { // align on 8 byte boundaries + return writeZeros(8 - trailingByteSize); + } + return 0; + } + }; + + // Create Arrow buffers for each of the blocks in this page + for (int b = 0; b < page.getBlockCount(); b++) { + var converter = response.columns.get(b).converter; + + Block block = page.getBlock(b); + nodes.add(new ArrowFieldNode(block.getPositionCount(), converter.nullValuesCount(block))); + converter.convert(block, bufs, bufWriters); + } + + // Create the batch and serialize it + ArrowRecordBatch batch = new ArrowRecordBatch( + page.getPositionCount(), + nodes, + bufs, + NoCompressionCodec.DEFAULT_BODY_COMPRESSION, + true, // align buffers + false // retain buffers + ); + MessageSerializer.serialize(arrowOut, batch); + + done = true; // one day we should respect sizeHint here. kindness. + } + } + + /** + * Trailer segment: write the Arrow end of stream marker + */ + private static class EndResponse extends ResponseSegment { + private boolean done = false; + + private EndResponse(ArrowResponse response) { + super(response); + } + + @Override + public boolean isDone() { + return done; + } + + @Override + protected void encodeChunk(int sizeHint, RecyclerBytesStreamOutput out) throws IOException { + ArrowStreamWriter.writeEndOfStream(new WriteChannel(arrowOut(out)), IpcOption.DEFAULT); + done = true; + } + } + + /** + * Converters for every ES|QL type + */ + static final Map ESQL_CONVERTERS = Map.ofEntries( + // For reference: + // - EsqlDataTypes: list of ESQL data types (not all are present in outputs) + // - PositionToXContent: conversions for ESQL JSON output + // - EsqlDataTypeConverter: conversions to ESQL datatypes + // Missing: multi-valued values + + buildEntry(new BlockConverter.AsNull("null")), + buildEntry(new BlockConverter.AsNull("unsupported")), + + buildEntry(new BlockConverter.AsBoolean("boolean")), + + buildEntry(new BlockConverter.AsInt32("integer")), + buildEntry(new BlockConverter.AsInt32("counter_integer")), + + buildEntry(new BlockConverter.AsInt64("long")), + // FIXME: counters: are they signed? + buildEntry(new BlockConverter.AsInt64("counter_long")), + buildEntry(new BlockConverter.AsInt64("unsigned_long", MinorType.UINT8)), + + buildEntry(new BlockConverter.AsFloat64("double")), + buildEntry(new BlockConverter.AsFloat64("counter_double")), + + buildEntry(new BlockConverter.AsVarChar("keyword")), + buildEntry(new BlockConverter.AsVarChar("text")), + + // date: array of int64 seconds since epoch + // FIXME: is it signed? + buildEntry(new BlockConverter.AsInt64("date", MinorType.TIMESTAMPMILLI)), + + // ip are represented as 16-byte ipv6 addresses. We shorten mapped ipv4 addresses to 4 bytes. + // Another option would be to use a fixed size binary to avoid the offset array. But with mostly + // ipv4 addresses it would still be twice as big. + buildEntry(new BlockConverter.TransformedBytesRef("ip", MinorType.VARBINARY, ValueConversions::shortenIpV4Addresses)), + + // geo_point: Keep WKB format (JSON converts to WKT) + buildEntry(new BlockConverter.AsVarBinary("geo_point")), + buildEntry(new BlockConverter.AsVarBinary("geo_shape")), + buildEntry(new BlockConverter.AsVarBinary("cartesian_point")), + buildEntry(new BlockConverter.AsVarBinary("cartesian_shape")), + + // version: convert to string + buildEntry(new BlockConverter.TransformedBytesRef("version", MinorType.VARCHAR, ValueConversions::versionToString)), + + // _source: json + // TODO: support also CBOR and SMILE with an additional formatting parameter + buildEntry(new BlockConverter.TransformedBytesRef("_source", MinorType.VARCHAR, ValueConversions::sourceToJson)) + ); + + private static Map.Entry buildEntry(BlockConverter converter) { + return Map.entry(converter.esqlType(), converter); + } +} diff --git a/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/BlockConverter.java b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/BlockConverter.java new file mode 100644 index 0000000000000..0a65792ab8e13 --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/BlockConverter.java @@ -0,0 +1,452 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.arrow; + +import org.apache.arrow.memory.ArrowBuf; +import org.apache.arrow.vector.types.Types; +import org.apache.arrow.vector.types.pojo.FieldType; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; + +import java.io.IOException; +import java.util.BitSet; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; + +public abstract class BlockConverter { + + private final FieldType fieldType; + private final String esqlType; + + protected BlockConverter(String esqlType, Types.MinorType minorType) { + // Add the exact ESQL type as field metadata + var meta = Map.of("elastic:type", esqlType); + this.fieldType = new FieldType(true, minorType.getType(), null, meta); + this.esqlType = esqlType; + } + + public final String esqlType() { + return this.esqlType; + } + + public final FieldType arrowFieldType() { + return this.fieldType; + } + + // Block.nullValuesCount was more efficient but was removed in https://github.com/elastic/elasticsearch/pull/108916 + protected int nullValuesCount(Block block) { + if (block.mayHaveNulls() == false) { + return 0; + } + + if (block.areAllValuesNull()) { + return block.getPositionCount(); + } + + int count = 0; + for (int i = 0; i < block.getPositionCount(); i++) { + if (block.isNull(i)) { + count++; + } + } + return count; + } + + public interface BufWriter { + long write(RecyclerBytesStreamOutput out) throws IOException; + } + + /** + * Convert a block into Arrow buffers. + * @param block the ESQL block + * @param bufs arrow buffers, used to track sizes + * @param bufWriters buffer writers, that will do the actual work of writing the data + */ + public abstract void convert(Block block, List bufs, List bufWriters); + + /** + * Conversion of Double blocks + */ + public static class AsFloat64 extends BlockConverter { + + public AsFloat64(String esqlType) { + super(esqlType, Types.MinorType.FLOAT8); + } + + @Override + public void convert(Block b, List bufs, List bufWriters) { + DoubleBlock block = (DoubleBlock) b; + + accumulateVectorValidity(bufs, bufWriters, block); + + bufs.add(dummyArrowBuf(vectorLength(block))); + bufWriters.add(out -> { + if (block.areAllValuesNull()) { + return BlockConverter.writeZeroes(out, vectorLength(block)); + } + + // TODO could we "just" get the memory of the array and dump it? + int count = block.getPositionCount(); + for (int i = 0; i < count; i++) { + out.writeDoubleLE(block.getDouble(i)); + } + return vectorLength(block); + }); + } + + private static int vectorLength(DoubleBlock b) { + return Double.BYTES * b.getPositionCount(); + } + } + + /** + * Conversion of Int blocks + */ + public static class AsInt32 extends BlockConverter { + + public AsInt32(String esqlType) { + super(esqlType, Types.MinorType.INT); + } + + @Override + public void convert(Block b, List bufs, List bufWriters) { + IntBlock block = (IntBlock) b; + + accumulateVectorValidity(bufs, bufWriters, block); + + bufs.add(dummyArrowBuf(vectorLength(block))); + bufWriters.add(out -> { + if (block.areAllValuesNull()) { + return BlockConverter.writeZeroes(out, vectorLength(block)); + } + + // TODO could we "just" get the memory of the array and dump it? + int count = block.getPositionCount(); + for (int i = 0; i < count; i++) { + out.writeIntLE(block.getInt(i)); + } + return vectorLength(block); + }); + } + + private static int vectorLength(IntBlock b) { + return Integer.BYTES * b.getPositionCount(); + } + } + + /** + * Conversion of Long blocks + */ + public static class AsInt64 extends BlockConverter { + public AsInt64(String esqlType) { + this(esqlType, Types.MinorType.BIGINT); + } + + protected AsInt64(String esqlType, Types.MinorType minorType) { + super(esqlType, minorType); + } + + @Override + public void convert(Block b, List bufs, List bufWriters) { + LongBlock block = (LongBlock) b; + accumulateVectorValidity(bufs, bufWriters, block); + + bufs.add(dummyArrowBuf(vectorLength(block))); + bufWriters.add(out -> { + if (block.areAllValuesNull()) { + return BlockConverter.writeZeroes(out, vectorLength(block)); + } + + // TODO could we "just" get the memory of the array and dump it? + int count = block.getPositionCount(); + for (int i = 0; i < count; i++) { + out.writeLongLE(block.getLong(i)); + } + return vectorLength(block); + }); + } + + private static int vectorLength(LongBlock b) { + return Long.BYTES * b.getPositionCount(); + } + } + + /** + * Conversion of Boolean blocks + */ + public static class AsBoolean extends BlockConverter { + public AsBoolean(String esqlType) { + super(esqlType, Types.MinorType.BIT); + } + + @Override + public void convert(Block b, List bufs, List bufWriters) { + BooleanBlock block = (BooleanBlock) b; + accumulateVectorValidity(bufs, bufWriters, block); + + bufs.add(dummyArrowBuf(vectorLength(block))); + bufWriters.add(out -> { + int count = block.getPositionCount(); + BitSet bits = new BitSet(); + + // Only set the bits that are true, writeBitSet will take + // care of adding zero bytes if needed. + if (block.areAllValuesNull() == false) { + for (int i = 0; i < count; i++) { + if (block.getBoolean(i)) { + bits.set(i); + } + } + } + + return BlockConverter.writeBitSet(out, bits, count); + }); + } + + private static int vectorLength(BooleanBlock b) { + return BlockConverter.bitSetLength(b.getPositionCount()); + } + } + + /** + * Conversion of ByteRef blocks + */ + public static class BytesRefConverter extends BlockConverter { + + public BytesRefConverter(String esqlType, Types.MinorType minorType) { + super(esqlType, minorType); + } + + @Override + public void convert(Block b, List bufs, List bufWriters) { + BytesRefBlock block = (BytesRefBlock) b; + + BlockConverter.accumulateVectorValidity(bufs, bufWriters, block); + + // Offsets vector + bufs.add(dummyArrowBuf(offsetVectorLength(block))); + + bufWriters.add(out -> { + if (block.areAllValuesNull()) { + var count = block.getPositionCount() + 1; + for (int i = 0; i < count; i++) { + out.writeIntLE(0); + } + return offsetVectorLength(block); + } + + // TODO could we "just" get the memory of the array and dump it? + BytesRef scratch = new BytesRef(); + int offset = 0; + for (int i = 0; i < block.getPositionCount(); i++) { + out.writeIntLE(offset); + // FIXME: add a ByteRefsVector.getLength(position): there are some cases + // where getBytesRef will allocate, which isn't needed here. + BytesRef v = block.getBytesRef(i, scratch); + + offset += v.length; + } + out.writeIntLE(offset); + return offsetVectorLength(block); + }); + + // Data vector + bufs.add(BlockConverter.dummyArrowBuf(dataVectorLength(block))); + + bufWriters.add(out -> { + if (block.areAllValuesNull()) { + return 0; + } + + // TODO could we "just" get the memory of the array and dump it? + BytesRef scratch = new BytesRef(); + long length = 0; + for (int i = 0; i < block.getPositionCount(); i++) { + BytesRef v = block.getBytesRef(i, scratch); + + out.write(v.bytes, v.offset, v.length); + length += v.length; + } + return length; + }); + } + + private static int offsetVectorLength(BytesRefBlock block) { + return Integer.BYTES * (block.getPositionCount() + 1); + } + + private int dataVectorLength(BytesRefBlock block) { + if (block.areAllValuesNull()) { + return 0; + } + + // TODO we can probably get the length from the vector without all this sum + + int length = 0; + BytesRef scratch = new BytesRef(); + for (int i = 0; i < block.getPositionCount(); i++) { + BytesRef v = block.getBytesRef(i, scratch); + length += v.length; + } + return length; + } + } + + /** + * Conversion of ByteRefs where each value is itself converted to a different format. + */ + public static class TransformedBytesRef extends BytesRefConverter { + + private final BiFunction valueConverter; + + /** + * + * @param esqlType ESQL type name + * @param minorType Arrow type + * @param valueConverter a function that takes (value, scratch) input parameters and returns the transformed value + */ + public TransformedBytesRef(String esqlType, Types.MinorType minorType, BiFunction valueConverter) { + super(esqlType, minorType); + this.valueConverter = valueConverter; + } + + @Override + public void convert(Block b, List bufs, List bufWriters) { + BytesRefBlock block = (BytesRefBlock) b; + try (BytesRefBlock transformed = transformValues(block)) { + super.convert(transformed, bufs, bufWriters); + } + } + + /** + * Creates a new BytesRefBlock by applying the value converter to each non null and non empty value + */ + private BytesRefBlock transformValues(BytesRefBlock block) { + try (BytesRefBlock.Builder builder = block.blockFactory().newBytesRefBlockBuilder(block.getPositionCount())) { + BytesRef scratch = new BytesRef(); + for (int i = 0; i < block.getPositionCount(); i++) { + if (block.isNull(i)) { + builder.appendNull(); + } else { + BytesRef bytes = block.getBytesRef(i, scratch); + if (bytes.length != 0) { + bytes = valueConverter.apply(bytes, scratch); + } + builder.appendBytesRef(bytes); + } + } + return builder.build(); + } + } + } + + public static class AsVarChar extends BytesRefConverter { + public AsVarChar(String esqlType) { + super(esqlType, Types.MinorType.VARCHAR); + } + } + + public static class AsVarBinary extends BytesRefConverter { + public AsVarBinary(String esqlType) { + super(esqlType, Types.MinorType.VARBINARY); + } + } + + public static class AsNull extends BlockConverter { + public AsNull(String esqlType) { + super(esqlType, Types.MinorType.NULL); + } + + @Override + public void convert(Block block, List bufs, List bufWriters) { + // Null vector in arrow has no associated buffers + // See https://arrow.apache.org/docs/format/Columnar.html#null-layout + } + } + + // Create a dummy ArrowBuf used for size accounting purposes. + private static ArrowBuf dummyArrowBuf(long size) { + return new ArrowBuf(null, null, 0, 0).writerIndex(size); + } + + // Length in bytes of a validity buffer + private static int bitSetLength(int totalValues) { + return (totalValues + 7) / 8; + } + + private static void accumulateVectorValidity(List bufs, List bufWriters, Block b) { + bufs.add(dummyArrowBuf(bitSetLength(b.getPositionCount()))); + bufWriters.add(out -> { + if (b.mayHaveNulls() == false) { + return writeAllTrueValidity(out, b.getPositionCount()); + } else if (b.areAllValuesNull()) { + return writeAllFalseValidity(out, b.getPositionCount()); + } else { + return writeValidities(out, b); + } + }); + } + + private static long writeAllTrueValidity(RecyclerBytesStreamOutput out, int valueCount) { + int allOnesCount = valueCount / 8; + for (int i = 0; i < allOnesCount; i++) { + out.writeByte((byte) 0xff); + } + int remaining = valueCount % 8; + if (remaining == 0) { + return allOnesCount; + } + out.writeByte((byte) ((1 << remaining) - 1)); + return allOnesCount + 1; + } + + private static long writeAllFalseValidity(RecyclerBytesStreamOutput out, int valueCount) { + int count = bitSetLength(valueCount); + for (int i = 0; i < count; i++) { + out.writeByte((byte) 0x00); + } + return count; + } + + private static long writeValidities(RecyclerBytesStreamOutput out, Block block) { + int valueCount = block.getPositionCount(); + BitSet bits = new BitSet(valueCount); + for (int i = 0; i < block.getPositionCount(); i++) { + if (block.isNull(i) == false) { + bits.set(i); + } + } + return writeBitSet(out, bits, valueCount); + } + + private static long writeBitSet(RecyclerBytesStreamOutput out, BitSet bits, int bitCount) { + byte[] bytes = bits.toByteArray(); + out.writeBytes(bytes, 0, bytes.length); + + // toByteArray will return bytes up to the last bit set. It may therefore + // have a length lower than what is needed to actually store bitCount bits. + int expectedLength = bitSetLength(bitCount); + writeZeroes(out, expectedLength - bytes.length); + + return expectedLength; + } + + private static long writeZeroes(RecyclerBytesStreamOutput out, int byteCount) { + for (int i = 0; i < byteCount; i++) { + out.writeByte((byte) 0); + } + return byteCount; + } +} diff --git a/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ValueConversions.java b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ValueConversions.java new file mode 100644 index 0000000000000..8139380aef1c8 --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ValueConversions.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.arrow; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.versionfield.Version; + +import java.io.IOException; + +/** + * Utilities to convert some of byte-encoded ESQL values into to a format more suitable + * for Arrow output. + */ +public class ValueConversions { + + /** + * Shorten ipv6-mapped ipv4 IP addresses to 4 bytes + */ + public static BytesRef shortenIpV4Addresses(BytesRef value, BytesRef scratch) { + // Same logic as sun.net.util.IPAddressUtil#isIPv4MappedAddress + // See https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2 + if (value.length == 16) { + int pos = value.offset; + byte[] bytes = value.bytes; + boolean isIpV4 = bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == 0 + && bytes[pos++] == (byte) 0xFF + && bytes[pos] == (byte) 0xFF; + + if (isIpV4) { + scratch.bytes = value.bytes; + scratch.offset = value.offset + 12; + scratch.length = 4; + return scratch; + } + } + return value; + } + + /** + * Convert binary-encoded versions to strings + */ + public static BytesRef versionToString(BytesRef value, BytesRef scratch) { + return new BytesRef(new Version(value).toString()); + } + + /** + * Convert any xcontent source to json + */ + public static BytesRef sourceToJson(BytesRef value, BytesRef scratch) { + try { + var valueArray = new BytesArray(value); + XContentType xContentType = XContentHelper.xContentType(valueArray); + if (xContentType == XContentType.JSON) { + return value; + } else { + String json = XContentHelper.convertToJson(valueArray, false, xContentType); + return new BytesRef(json); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/esql/arrow/src/test/java/org/elasticsearch/xpack/esql/arrow/ArrowResponseTests.java b/x-pack/plugin/esql/arrow/src/test/java/org/elasticsearch/xpack/esql/arrow/ArrowResponseTests.java new file mode 100644 index 0000000000000..cf49b37db2805 --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/test/java/org/elasticsearch/xpack/esql/arrow/ArrowResponseTests.java @@ -0,0 +1,600 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.arrow; + +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.BigIntVector; +import org.apache.arrow.vector.BitVector; +import org.apache.arrow.vector.FieldVector; +import org.apache.arrow.vector.Float8Vector; +import org.apache.arrow.vector.IntVector; +import org.apache.arrow.vector.TimeStampMilliVector; +import org.apache.arrow.vector.UInt8Vector; +import org.apache.arrow.vector.ValueVector; +import org.apache.arrow.vector.VarBinaryVector; +import org.apache.arrow.vector.VarCharVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.ipc.ArrowStreamReader; +import org.apache.arrow.vector.util.VectorSchemaRootAppender; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVectorBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.BytesRefRecycler; +import org.elasticsearch.xpack.versionfield.Version; +import org.junit.AfterClass; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +public class ArrowResponseTests extends ESTestCase { + + private static final BlockFactory BLOCK_FACTORY = BlockFactory.getInstance( + new NoopCircuitBreaker("test-noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + + private static final RootAllocator ALLOCATOR = new RootAllocator(); + + @AfterClass + public static void afterClass() throws Exception { + ALLOCATOR.close(); + } + + // --------------------------------------------------------------------------------------------- + // Value creation, getters for ESQL and Arrow + + static final ValueType INTEGER_VALUES = new ValueTypeImpl( + factory -> factory.newIntBlockBuilder(0), + block -> block.appendInt(randomInt()), + (block, i, scratch) -> block.getInt(i), + IntVector::get + ); + + static final ValueType LONG_VALUES = new ValueTypeImpl( + factory -> factory.newLongBlockBuilder(0), + block -> block.appendLong(randomLong()), + (block, i, scratch) -> block.getLong(i), + BigIntVector::get + ); + + static final ValueType ULONG_VALUES = new ValueTypeImpl( + factory -> factory.newLongBlockBuilder(0), + block -> block.appendLong(randomLong()), + (block, i, scratch) -> block.getLong(i), + UInt8Vector::get + ); + + static final ValueType DATE_VALUES = new ValueTypeImpl( + factory -> factory.newLongBlockBuilder(0), + block -> block.appendLong(randomLong()), + (block, i, scratch) -> block.getLong(i), + TimeStampMilliVector::get + ); + + static final ValueType DOUBLE_VALUES = new ValueTypeImpl( + factory -> factory.newDoubleBlockBuilder(0), + block -> block.appendDouble(randomDouble()), + (block, i, scratch) -> block.getDouble(i), + Float8Vector::get + ); + + static final ValueType BOOLEAN_VALUES = new ValueTypeImpl( + factory -> factory.newBooleanBlockBuilder(0), + block -> block.appendBoolean(randomBoolean()), + (b, i, s) -> b.getBoolean(i), + (v, i) -> v.get(i) != 0 // Arrow's BitVector returns 0 or 1 + ); + + static final ValueType TEXT_VALUES = new ValueTypeImpl( + factory -> factory.newBytesRefBlockBuilder(0), + block -> block.appendBytesRef(new BytesRef("🚀" + randomAlphaOfLengthBetween(1, 20))), + (b, i, s) -> b.getBytesRef(i, s).utf8ToString(), + (v, i) -> new String(v.get(i), StandardCharsets.UTF_8) + ); + + static final ValueType SOURCE_VALUES = new ValueTypeImpl( + factory -> factory.newBytesRefBlockBuilder(0), + // Use a constant value, conversion is tested separately + block -> block.appendBytesRef(new BytesRef("{\"foo\": 42}")), + (b, i, s) -> b.getBytesRef(i, s).utf8ToString(), + (v, i) -> new String(v.get(i), StandardCharsets.UTF_8) + ); + + static final ValueType IP_VALUES = new ValueTypeImpl( + factory -> factory.newBytesRefBlockBuilder(0), + block -> { + byte[] addr = InetAddressPoint.encode(randomIp(randomBoolean())); + assertEquals(16, addr.length); // Make sure all is ipv6-mapped + block.appendBytesRef(new BytesRef(addr)); + }, + (b, i, s) -> ValueConversions.shortenIpV4Addresses(b.getBytesRef(i, s), new BytesRef()), + (v, i) -> new BytesRef(v.get(i)) + ); + + static final ValueType BINARY_VALUES = new ValueTypeImpl( + factory -> factory.newBytesRefBlockBuilder(0), + block -> block.appendBytesRef(new BytesRef(randomByteArrayOfLength(randomIntBetween(1, 100)))), + BytesRefBlock::getBytesRef, + (v, i) -> new BytesRef(v.get(i)) + ); + + static final ValueType VERSION_VALUES = new ValueTypeImpl( + factory -> factory.newBytesRefBlockBuilder(0), + block -> block.appendBytesRef(new Version(between(0, 100) + "." + between(0, 100) + "." + between(0, 100)).toBytesRef()), + (b, i, s) -> new Version(b.getBytesRef(i, s)).toString(), + (v, i) -> new String(v.get(i), StandardCharsets.UTF_8) + ); + + static final ValueType NULL_VALUES = new ValueTypeImpl( + factory -> factory.newBytesRefBlockBuilder(0), + Block.Builder::appendNull, + (b, i, s) -> b.isNull(i) ? null : "non-null in block", + (v, i) -> v.isNull(i) ? null : "non-null in vector" + ); + + static final Map VALUE_TYPES = Map.ofEntries( + Map.entry("integer", INTEGER_VALUES), + Map.entry("counter_integer", INTEGER_VALUES), + Map.entry("long", LONG_VALUES), + Map.entry("counter_long", LONG_VALUES), + Map.entry("unsigned_long", ULONG_VALUES), + Map.entry("double", DOUBLE_VALUES), + Map.entry("counter_double", DOUBLE_VALUES), + + Map.entry("text", TEXT_VALUES), + Map.entry("keyword", TEXT_VALUES), + + Map.entry("boolean", BOOLEAN_VALUES), + Map.entry("date", DATE_VALUES), + Map.entry("ip", IP_VALUES), + Map.entry("version", VERSION_VALUES), + Map.entry("_source", SOURCE_VALUES), + + Map.entry("null", NULL_VALUES), + Map.entry("unsupported", NULL_VALUES), + + // All geo types just pass-through WKB, use random binary data + Map.entry("geo_point", BINARY_VALUES), + Map.entry("geo_shape", BINARY_VALUES), + Map.entry("cartesian_point", BINARY_VALUES), + Map.entry("cartesian_shape", BINARY_VALUES) + ); + + // --------------------------------------------------------------------------------------------- + // Tests + + public void testTestHarness() { + TestColumn testColumn = TestColumn.create("foo", "integer"); + TestBlock denseBlock = TestBlock.create(BLOCK_FACTORY, testColumn, Density.Dense, 3); + TestBlock sparseBlock = TestBlock.create(BLOCK_FACTORY, testColumn, Density.Sparse, 5); + TestBlock emptyBlock = TestBlock.create(BLOCK_FACTORY, testColumn, Density.Empty, 7); + + // Test that density works as expected + assertTrue(denseBlock.block instanceof IntVectorBlock); + assertEquals("IntArrayBlock", sparseBlock.block.getClass().getSimpleName()); // non-public class + assertEquals("ConstantNullBlock", emptyBlock.block.getClass().getSimpleName()); + + // Test that values iterator scans all pages + List pages = Stream.of(denseBlock, sparseBlock, emptyBlock).map(b -> new TestPage(List.of(b))).toList(); + TestCase tc = new TestCase(List.of(testColumn), pages); + EsqlValuesIterator valuesIterator = new EsqlValuesIterator(tc, 0); + int count = 0; + while (valuesIterator.hasNext()) { + valuesIterator.next(); + count++; + } + assertEquals(3 + 5 + 7, count); + + // Test that we have value types for all types + List converters = new ArrayList<>(ArrowResponse.ESQL_CONVERTERS.keySet()); + Collections.sort(converters); + List valueTypes = new ArrayList<>(VALUE_TYPES.keySet()); + Collections.sort(valueTypes); + assertEquals("Missing test value types", converters, valueTypes); + } + + /** + * Test single-column for all types with a mix of dense/sparse/empty pages + */ + public void testSingleColumn() throws IOException { + for (var type : VALUE_TYPES.keySet()) { + TestColumn testColumn = new TestColumn("foo", type, VALUE_TYPES.get(type)); + List pages = new ArrayList<>(); + + for (var density : Density.values()) { + TestBlock testBlock = TestBlock.create(BLOCK_FACTORY, testColumn, density, 10); + TestPage testPage = new TestPage(List.of(testBlock)); + pages.add(testPage); + } + TestCase testCase = new TestCase(List.of(testColumn), pages); + + compareEsqlAndArrow(testCase); + } + } + + public void testSingleBlock() throws IOException { + // Simple test to easily focus on a specific type & density + String type = "text"; + Density density = Density.Dense; + + TestColumn testColumn = new TestColumn("foo", type, VALUE_TYPES.get(type)); + List pages = new ArrayList<>(); + + TestBlock testBlock = TestBlock.create(BLOCK_FACTORY, testColumn, density, 10); + TestPage testPage = new TestPage(List.of(testBlock)); + pages.add(testPage); + + TestCase testCase = new TestCase(List.of(testColumn), pages); + + compareEsqlAndArrow(testCase); + } + + /** + * Test that multivalued arrays are rejected + */ + public void testMultivaluedField() throws IOException { + IntBlock.Builder builder = BLOCK_FACTORY.newIntBlockBuilder(0); + builder.appendInt(42); + builder.appendNull(); + builder.beginPositionEntry(); + builder.appendInt(44); + builder.appendInt(45); + builder.endPositionEntry(); + builder.appendInt(46); + IntBlock block = builder.build(); + + // Consistency check + assertTrue(block.mayHaveMultivaluedFields()); + assertEquals(0, block.getFirstValueIndex(0)); + assertEquals(1, block.getValueCount(0)); + + // null values still use one position in the array + assertEquals(0, block.getValueCount(1)); + assertEquals(1, block.getFirstValueIndex(1)); + assertTrue(block.isNull(1)); + assertEquals(0, block.getInt(1)); + + assertEquals(2, block.getFirstValueIndex(2)); + assertEquals(2, block.getValueCount(2)); + assertEquals(2, block.getFirstValueIndex(2)); + assertEquals(45, block.getInt(block.getFirstValueIndex(2) + 1)); + + assertEquals(4, block.getFirstValueIndex(3)); + + var column = TestColumn.create("some-field", "integer"); + TestCase testCase = new TestCase(List.of(column), List.of(new TestPage(List.of(new TestBlock(column, block, Density.Dense))))); + + IllegalArgumentException exc = assertThrows(IllegalArgumentException.class, () -> compareEsqlAndArrow(testCase)); + + assertEquals("ES|QL response field [some-field] is multi-valued. This isn't supported yet by the Arrow format", exc.getMessage()); + + } + + /** + * Test a random set of types/columns/pages/densities + */ + public void testRandomTypesAndSize() throws IOException { + + // Shuffle types to randomize their succession in the Arrow stream + List types = new ArrayList<>(VALUE_TYPES.keySet()); + Collections.shuffle(types, random()); + + List columns = types.stream().map(type -> TestColumn.create("col-" + type, type)).toList(); + + List pages = IntStream + // 1 to 10 pages of random density and 1 to 1000 values + .range(0, randomIntBetween(1, 100)) + .mapToObj(i -> TestPage.create(BLOCK_FACTORY, columns)) + .toList(); + + TestCase testCase = new TestCase(columns, pages); + // System.out.println(testCase); + // for (TestPage page: pages) { + // System.out.println(page); + // } + + compareEsqlAndArrow(testCase); + } + + // --------------------------------------------------------------------------------------------- + // Test harness + + private void compareEsqlAndArrow(TestCase testCase) throws IOException { + try (VectorSchemaRoot arrowVectors = toArrowVectors(testCase)) { + compareEsqlAndArrow(testCase, arrowVectors); + } + } + + private void compareEsqlAndArrow(TestCase testCase, VectorSchemaRoot root) { + for (int i = 0; i < testCase.columns.size(); i++) { + + // Check esql type in the metadata + var metadata = root.getSchema().getFields().get(i).getMetadata(); + assertEquals(testCase.columns.get(i).type, metadata.get("elastic:type")); + + // Check values + var esqlValuesIterator = new EsqlValuesIterator(testCase, i); + var arrowValuesIterator = new ArrowValuesIterator(testCase, root, i); + + while (esqlValuesIterator.hasNext() && arrowValuesIterator.hasNext()) { + assertEquals(esqlValuesIterator.next(), arrowValuesIterator.next()); + } + + // Make sure we entirely consumed both sides. + assertFalse(esqlValuesIterator.hasNext()); + assertFalse(arrowValuesIterator.hasNext()); + } + } + + private VectorSchemaRoot toArrowVectors(TestCase testCase) throws IOException { + ArrowResponse response = new ArrowResponse( + testCase.columns.stream().map(c -> new ArrowResponse.Column(c.type, c.name)).toList(), + testCase.pages.stream().map(p -> new Page(p.blocks.stream().map(b -> b.block).toArray(Block[]::new))).toList() + ); + + assertEquals("application/vnd.apache.arrow.stream", response.getResponseContentTypeString()); + + BytesReference bytes = serializeBlocksDirectly(response); + try ( + ArrowStreamReader reader = new ArrowStreamReader(bytes.streamInput(), ALLOCATOR); + VectorSchemaRoot readerRoot = reader.getVectorSchemaRoot(); + ) { + VectorSchemaRoot root = VectorSchemaRoot.create(readerRoot.getSchema(), ALLOCATOR); + root.allocateNew(); + + while (reader.loadNextBatch()) { + VectorSchemaRootAppender.append(root, readerRoot); + } + + return root; + } + } + + /** + * An iterator over values of a column across all pages. + */ + static class EsqlValuesIterator implements Iterator { + private final int fieldPos; + private final ValueType type; + private final BytesRef scratch = new BytesRef(); + private final Iterator pages; + + private TestPage page; + private int position; + + EsqlValuesIterator(TestCase testCase, int column) { + this.fieldPos = column; + this.type = testCase.columns.get(column).valueType; + this.position = 0; + this.pages = testCase.pages.iterator(); + this.page = pages.next(); + } + + @Override + public boolean hasNext() { + return page != null; + } + + @Override + public Object next() { + if (page == null) { + throw new NoSuchElementException(); + } + Block block = page.blocks.get(fieldPos).block; + Object result = block.isNull(position) ? null : type.valueAt(block, position, scratch); + position++; + if (position >= block.getPositionCount()) { + position = 0; + page = pages.hasNext() ? pages.next() : null; + } + return result; + } + } + + static class ArrowValuesIterator implements Iterator { + private final ValueType type; + private ValueVector vector; + private int position; + + ArrowValuesIterator(TestCase testCase, VectorSchemaRoot root, int column) { + this(root.getVector(column), testCase.columns.get(column).valueType); + } + + ArrowValuesIterator(ValueVector vector, ValueType type) { + this.vector = vector; + this.type = type; + } + + @Override + public boolean hasNext() { + return vector != null; + } + + @Override + public Object next() { + if (vector == null) { + throw new NoSuchElementException(); + } + Object result = vector.isNull(position) ? null : type.valueAt(vector, position); + position++; + if (position >= vector.getValueCount()) { + vector = null; + } + return result; + } + } + + private BytesReference serializeBlocksDirectly(ArrowResponse body) throws IOException { + // Ensure there's a single part, this will fail if we ever change it. + assertTrue(body.isLastPart()); + + List ourEncoding = new ArrayList<>(); + int page = 0; + while (body.isPartComplete() == false) { + ourEncoding.add(body.encodeChunk(1500, BytesRefRecycler.NON_RECYCLING_INSTANCE)); + page++; + } + return CompositeBytesReference.of(ourEncoding.toArray(BytesReference[]::new)); + } + + record TestCase(List columns, List pages) { + @Override + public String toString() { + return pages.size() + " pages of " + columns.stream().map(TestColumn::type).collect(Collectors.joining("|")); + } + } + + record TestColumn(String name, String type, ValueType valueType) { + static TestColumn create(String name, String type) { + return new TestColumn(name, type, VALUE_TYPES.get(type)); + } + } + + record TestPage(List blocks) { + + static TestPage create(BlockFactory factory, List columns) { + int size = randomIntBetween(1, 1000); + return new TestPage(columns.stream().map(column -> TestBlock.create(factory, column, size)).toList()); + } + + @Override + public String toString() { + return blocks.get(0).block.getPositionCount() + + " items - " + + blocks.stream().map(b -> b.density.toString()).collect(Collectors.joining("|")); + } + } + + record TestBlock(TestColumn column, Block block, Density density) { + + static TestBlock create(BlockFactory factory, TestColumn column, int positions) { + return create(factory, column, randomFrom(Density.values()), positions); + } + + static TestBlock create(BlockFactory factory, TestColumn column, Density density, int positions) { + ValueType valueType = column.valueType(); + Block block; + if (density == Density.Empty) { + block = factory.newConstantNullBlock(positions); + } else { + Block.Builder builder = valueType.createBlockBuilder(factory); + int start = 0; + if (density == Density.Sparse && positions >= 2) { + // Make sure it's really sparse even if randomness of values may decide otherwise + valueType.addValue(builder, Density.Dense); + valueType.addValue(builder, Density.Empty); + start = 2; + } + for (int i = start; i < positions; i++) { + valueType.addValue(builder, density); + } + // Will create an ArrayBlock if there are null values, VectorBlock otherwise + block = builder.build(); + } + return new TestBlock(column, block, density); + } + } + + public enum Density { + Empty, + Sparse, + Dense; + + boolean nextIsNull() { + return switch (this) { + case Empty -> true; + case Sparse -> randomBoolean(); + case Dense -> false; + }; + } + } + + interface ValueType { + Block.Builder createBlockBuilder(BlockFactory factory); + + void addValue(Block.Builder builder, Density density); + + Object valueAt(Block block, int position, BytesRef scratch); + + Object valueAt(ValueVector arrowVec, int position); + } + + public static class ValueTypeImpl + implements + ValueType { + private final Function builderCreator; + private final Consumer valueAdder; + private final TriFunction blockGetter; + private final BiFunction vectorGetter; + + public ValueTypeImpl( + Function builderCreator, + Consumer valueAdder, + TriFunction blockGetter, + BiFunction vectorGetter + ) { + this.builderCreator = builderCreator; + this.valueAdder = valueAdder; + this.blockGetter = blockGetter; + this.vectorGetter = vectorGetter; + } + + @Override + public Block.Builder createBlockBuilder(BlockFactory factory) { + return builderCreator.apply(factory); + } + + @Override + @SuppressWarnings("unchecked") + public void addValue(Block.Builder builder, Density density) { + if (density.nextIsNull()) { + builder.appendNull(); + } else { + valueAdder.accept((BlockBT) builder); + } + } + + @Override + @SuppressWarnings("unchecked") + public Object valueAt(Block block, int position, BytesRef scratch) { + return blockGetter.apply((BlockT) block, position, scratch); + } + + @Override + @SuppressWarnings("unchecked") + public Object valueAt(ValueVector arrowVec, int position) { + return vectorGetter.apply((VectorT) arrowVec, position); + } + } +} diff --git a/x-pack/plugin/esql/arrow/src/test/java/org/elasticsearch/xpack/esql/arrow/ValueConversionsTests.java b/x-pack/plugin/esql/arrow/src/test/java/org/elasticsearch/xpack/esql/arrow/ValueConversionsTests.java new file mode 100644 index 0000000000000..e700bbd6a3eb5 --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/test/java/org/elasticsearch/xpack/esql/arrow/ValueConversionsTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.arrow; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xpack.esql.core.util.StringUtils; +import org.elasticsearch.xpack.versionfield.Version; + +public class ValueConversionsTests extends ESTestCase { + + public void testIpConversion() throws Exception { + { + // ipv6 address + BytesRef bytes = StringUtils.parseIP("2a00:1450:4007:818::200e"); + assertArrayEquals( + new byte[] { 0x2a, 0x00, 0x14, 0x50, 0x40, 0x07, 0x08, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0e }, + bytes.bytes + ); + + BytesRef scratch = new BytesRef(); + BytesRef bytes2 = ValueConversions.shortenIpV4Addresses(bytes.clone(), scratch); + assertEquals(bytes, bytes2); + } + { + // ipv6 mapped ipv4 address + BytesRef bytes = StringUtils.parseIP("216.58.214.174"); + assertArrayEquals( + new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, (byte) 0xFF, (byte) 0xFF, (byte) 216, (byte) 58, (byte) 214, (byte) 174 }, + bytes.bytes + ); + + BytesRef scratch = new BytesRef(); + BytesRef bytes2 = ValueConversions.shortenIpV4Addresses(bytes.clone(), scratch); + + assertTrue(new BytesRef(new byte[] { (byte) 216, (byte) 58, (byte) 214, (byte) 174 }).bytesEquals(bytes2)); + + } + } + + public void testVersionConversion() { + String version = "1.2.3-alpha"; + + BytesRef bytes = new Version("1.2.3-alpha").toBytesRef(); + + BytesRef scratch = new BytesRef(); + BytesRef bytes2 = ValueConversions.versionToString(bytes, scratch); + + // Some conversion happened + assertNotEquals(bytes.length, bytes2.length); + assertEquals(version, bytes2.utf8ToString()); + } + + public void testSourceToJson() throws Exception { + BytesRef bytes = new BytesRef("{\"foo\": 42}"); + + BytesRef scratch = new BytesRef(); + BytesRef bytes2 = ValueConversions.sourceToJson(bytes, scratch); + // No change, even indentation + assertEquals("{\"foo\": 42}", bytes2.utf8ToString()); + } + + public void testCborSourceToJson() throws Exception { + XContentBuilder builder = XContentFactory.cborBuilder(); + builder.startObject(); + builder.field("foo", 42); + builder.endObject(); + builder.close(); + BytesRef bytesRef = BytesReference.bytes(builder).toBytesRef(); + + BytesRef scratch = new BytesRef(); + BytesRef bytes2 = ValueConversions.sourceToJson(bytesRef, scratch); + // Converted to JSON + assertEquals("{\"foo\":42}", bytes2.utf8ToString()); + } +} diff --git a/x-pack/plugin/esql/arrow/src/test/resources/plugin-security.policy b/x-pack/plugin/esql/arrow/src/test/resources/plugin-security.policy new file mode 100644 index 0000000000000..c5da65410d3da --- /dev/null +++ b/x-pack/plugin/esql/arrow/src/test/resources/plugin-security.policy @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +// Needed by the Arrow memory manager +grant { + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; +}; diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index dbec0963d1aab..1694115aaa71d 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -25,6 +25,8 @@ dependencies { implementation project('compute:ann') implementation project(':libs:elasticsearch-dissect') implementation project(':libs:elasticsearch-grok') + implementation project('arrow') + // Also contains a dummy processor to allow compilation with unused annotations. annotationProcessor project('compute:gen') @@ -46,9 +48,9 @@ dependencies { } tasks.named("compileJava").configure { - options.compilerArgs.addAll(["-s", "${projectDir}/src/main/generated"]) + options.compilerArgs.addAll(["-s", "src/main/generated"]) // IntelliJ sticks generated files here and we can't stop it.... - exclude { it.file.toString().startsWith("${projectDir}/src/main/generated-src/generated") } + exclude { it.file.toString().contains("src/main/generated-src/generated") } } interface Injected { @@ -254,8 +256,8 @@ tasks.register("regen") { tasks.named("spotlessJava") { dependsOn stringTemplates } tasks.named('checkstyleMain').configure { excludes = [ "**/*.java.st" ] - exclude { it.file.toString().startsWith("${projectDir}/src/main/generated-src/generated") } - exclude { it.file.toString().startsWith("${projectDir}/src/main/generated") } + exclude { it.file.toString().contains("src/main/generated-src/generated") } + exclude { it.file.toString().contains("src/main/generated") } } def prop(Type, type, TYPE, BYTES, Array) { @@ -308,7 +310,3 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/xpack/esql/enrich/EnrichResultBuilderForBoolean.java" } } - -tasks.withType(CheckForbiddenApisTask).configureEach { - signaturesFiles += files('src/main/resources/forbidden/ql-signatures.txt') -} diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index c7fa29c6a91f0..e5816d0b7c78b 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -17,7 +17,7 @@ dependencies { } tasks.named("compileJava").configure { - options.compilerArgs.addAll(["-s", "${projectDir}/src/main/generated"]) + options.compilerArgs.addAll(["-s", "src/main/generated"]) } tasks.named('checkstyleMain').configure { @@ -400,6 +400,11 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java" } File stateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st") + template { + it.properties = booleanProperties + it.inputFile = stateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanState.java" + } template { it.properties = intProperties it.inputFile = stateInputFile @@ -453,6 +458,11 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/data/BooleanLookup.java" } File arrayStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st") + template { + it.properties = booleanProperties + it.inputFile = arrayStateInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/BooleanArrayState.java" + } template { it.properties = intProperties it.inputFile = arrayStateInputFile @@ -523,26 +533,26 @@ tasks.named('stringTemplates').configure { } - File topListAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st") + File topAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-TopAggregator.java.st") template { it.properties = intProperties - it.inputFile = topListAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/TopListIntAggregator.java" + it.inputFile = topAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopIntAggregator.java" } template { it.properties = longProperties - it.inputFile = topListAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/TopListLongAggregator.java" + it.inputFile = topAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopLongAggregator.java" } template { it.properties = floatProperties - it.inputFile = topListAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/TopListFloatAggregator.java" + it.inputFile = topAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopFloatAggregator.java" } template { it.properties = doubleProperties - it.inputFile = topListAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java" + it.inputFile = topAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/TopDoubleAggregator.java" } File multivalueDedupeInputFile = file("src/main/java/org/elasticsearch/compute/operator/mvdedupe/X-MultivalueDedupe.java.st") diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 1127d4b4ccb72..b3d32a82cc7a9 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -445,6 +445,8 @@ String intermediateStateRowAccess() { private String primitiveStateMethod() { switch (stateType.toString()) { + case "org.elasticsearch.compute.aggregation.BooleanState": + return "booleanValue"; case "org.elasticsearch.compute.aggregation.IntState": return "intValue"; case "org.elasticsearch.compute.aggregation.LongState": @@ -494,6 +496,9 @@ private MethodSpec evaluateFinal() { private void primitiveStateToResult(MethodSpec.Builder builder) { switch (stateType.toString()) { + case "org.elasticsearch.compute.aggregation.BooleanState": + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1)"); + return; case "org.elasticsearch.compute.aggregation.IntState": builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1)"); return; @@ -531,8 +536,9 @@ private MethodSpec close() { private boolean hasPrimitiveState() { return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.IntState", "org.elasticsearch.compute.aggregation.LongState", - "org.elasticsearch.compute.aggregation.DoubleState", "org.elasticsearch.compute.aggregation.FloatState" -> true; + case "org.elasticsearch.compute.aggregation.BooleanState", "org.elasticsearch.compute.aggregation.IntState", + "org.elasticsearch.compute.aggregation.LongState", "org.elasticsearch.compute.aggregation.DoubleState", + "org.elasticsearch.compute.aggregation.FloatState" -> true; default -> false; }; } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index c9cdcfe42fddd..79df41f304c06 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -584,8 +584,9 @@ private MethodSpec close() { private boolean hasPrimitiveState() { return switch (stateType.toString()) { - case "org.elasticsearch.compute.aggregation.IntArrayState", "org.elasticsearch.compute.aggregation.LongArrayState", - "org.elasticsearch.compute.aggregation.DoubleArrayState", "org.elasticsearch.compute.aggregation.FloatArrayState" -> true; + case "org.elasticsearch.compute.aggregation.BooleanArrayState", "org.elasticsearch.compute.aggregation.IntArrayState", + "org.elasticsearch.compute.aggregation.LongArrayState", "org.elasticsearch.compute.aggregation.DoubleArrayState", + "org.elasticsearch.compute.aggregation.FloatArrayState" -> true; default -> false; }; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanArrayState.java new file mode 100644 index 0000000000000..79f4a88d403c6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanArrayState.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregator state for an array of booleans. It is created in a mode where it + * won't track the {@code groupId}s that are sent to it and it is the + * responsibility of the caller to only fetch values for {@code groupId}s + * that it has sent using the {@code selected} parameter when building the + * results. This is fine when there are no {@code null} values in the input + * data. But once there are null values in the input data it is + * much more convenient to only send non-null values and + * the tracking built into the grouping code can't track that. In that case + * call {@link #enableGroupIdTracking} to transition the state into a mode + * where it'll track which {@code groupIds} have been written. + *

      + * This class is generated. Do not edit it. + *

      + */ +final class BooleanArrayState extends AbstractArrayState implements GroupingAggregatorState { + private final boolean init; + + private BitArray values; + private int size; + + BooleanArrayState(BigArrays bigArrays, boolean init) { + super(bigArrays); + this.values = new BitArray(1, bigArrays); + this.size = 1; + this.values.set(0, init); + this.init = init; + } + + boolean get(int groupId) { + return values.get(groupId); + } + + boolean getOrDefault(int groupId) { + return groupId < values.size() ? values.get(groupId) : init; + } + + void set(int groupId, boolean value) { + ensureCapacity(groupId); + values.set(groupId, value); + trackGroupId(groupId); + } + + Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { + if (false == trackingGroupIds()) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + builder.appendBoolean(i, values.get(selected.getInt(i))); + } + return builder.build().asBlock(); + } + } + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (hasValue(group)) { + builder.appendBoolean(values.get(group)); + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + private void ensureCapacity(int groupId) { + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate( + Block[] blocks, + int offset, + IntVector selected, + org.elasticsearch.compute.operator.DriverContext driverContext + ) { + assert blocks.length >= offset + 2; + try ( + var valuesBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + if (group < values.size()) { + valuesBuilder.appendBoolean(values.get(group)); + } else { + valuesBuilder.appendBoolean(false); // TODO can we just use null? + } + hasValueBuilder.appendBoolean(i, hasValue(group)); + } + blocks[offset + 0] = valuesBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); + } + } + + @Override + public void close() { + Releasables.close(values, super::close); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java new file mode 100644 index 0000000000000..7d225c7c06a72 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/BooleanState.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * Aggregator state for a single boolean. + * This class is generated. Do not edit it. + */ +final class BooleanState implements AggregatorState { + private boolean value; + private boolean seen; + + BooleanState() { + this(false); + } + + BooleanState(boolean init) { + this.value = init; + } + + boolean booleanValue() { + return value; + } + + void booleanValue(boolean value) { + this.value = value; + } + + boolean seen() { + return seen; + } + + void seen(boolean seen) { + this.seen = seen; + } + + /** Extracts an intermediate view of the contents of this state. */ + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 2; + blocks[offset + 0] = driverContext.blockFactory().newConstantBooleanBlockWith(value, 1); + blocks[offset + 1] = driverContext.blockFactory().newConstantBooleanBlockWith(seen, 1); + } + + @Override + public void close() {} +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopDoubleAggregator.java similarity index 97% rename from x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java rename to x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopDoubleAggregator.java index 941722b4424d3..3bd76b79d62f2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopDoubleAggregator.java @@ -24,9 +24,9 @@ /** * Aggregates the top N field values for double. */ -@Aggregator({ @IntermediateState(name = "topList", type = "DOUBLE_BLOCK") }) +@Aggregator({ @IntermediateState(name = "top", type = "DOUBLE_BLOCK") }) @GroupingAggregator -class TopListDoubleAggregator { +class TopDoubleAggregator { public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { return new SingleState(bigArrays, limit, ascending); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopFloatAggregator.java similarity index 97% rename from x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListFloatAggregator.java rename to x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopFloatAggregator.java index c5fc51d5ba13f..066c82e9448fb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListFloatAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopFloatAggregator.java @@ -24,9 +24,9 @@ /** * Aggregates the top N field values for float. */ -@Aggregator({ @IntermediateState(name = "topList", type = "FLOAT_BLOCK") }) +@Aggregator({ @IntermediateState(name = "top", type = "FLOAT_BLOCK") }) @GroupingAggregator -class TopListFloatAggregator { +class TopFloatAggregator { public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { return new SingleState(bigArrays, limit, ascending); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopIntAggregator.java similarity index 97% rename from x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListIntAggregator.java rename to x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopIntAggregator.java index dafbf1c2a3051..2f5149c594d94 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopIntAggregator.java @@ -24,9 +24,9 @@ /** * Aggregates the top N field values for int. */ -@Aggregator({ @IntermediateState(name = "topList", type = "INT_BLOCK") }) +@Aggregator({ @IntermediateState(name = "top", type = "INT_BLOCK") }) @GroupingAggregator -class TopListIntAggregator { +class TopIntAggregator { public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { return new SingleState(bigArrays, limit, ascending); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopLongAggregator.java similarity index 97% rename from x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListLongAggregator.java rename to x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopLongAggregator.java index c0e7122a4be0b..d6bafaa30c425 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopListLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/TopLongAggregator.java @@ -24,9 +24,9 @@ /** * Aggregates the top N field values for long. */ -@Aggregator({ @IntermediateState(name = "topList", type = "LONG_BLOCK") }) +@Aggregator({ @IntermediateState(name = "top", type = "LONG_BLOCK") }) @GroupingAggregator -class TopListLongAggregator { +class TopLongAggregator { public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { return new SingleState(bigArrays, limit, ascending); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunction.java new file mode 100644 index 0000000000000..2ffbcfc2d9458 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunction.java @@ -0,0 +1,136 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MaxBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBooleanAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final BooleanState state; + + private final List channels; + + public MaxBooleanAggregatorFunction(DriverContext driverContext, List channels, + BooleanState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MaxBooleanAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MaxBooleanAggregatorFunction(driverContext, channels, new BooleanState(MaxBooleanAggregator.init())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BooleanBlock block = page.getBlock(channels.get(0)); + BooleanVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BooleanVector vector) { + state.seen(true); + for (int i = 0; i < vector.getPositionCount(); i++) { + state.booleanValue(MaxBooleanAggregator.combine(state.booleanValue(), vector.getBoolean(i))); + } + } + + private void addRawBlock(BooleanBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + state.seen(true); + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + state.booleanValue(MaxBooleanAggregator.combine(state.booleanValue(), block.getBoolean(i))); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BooleanVector max = ((BooleanBlock) maxUncast).asVector(); + assert max.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + if (seen.getBoolean(0)) { + state.booleanValue(MaxBooleanAggregator.combine(state.booleanValue(), max.getBoolean(0))); + state.seen(true); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + if (state.seen() == false) { + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); + return; + } + blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..e5bbf63ddee07 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MaxBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBooleanAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MaxBooleanAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MaxBooleanAggregatorFunction aggregator(DriverContext driverContext) { + return MaxBooleanAggregatorFunction.create(driverContext, channels); + } + + @Override + public MaxBooleanGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MaxBooleanGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "max of booleans"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..f404fccd45d51 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java @@ -0,0 +1,206 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MaxBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MaxBooleanGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("max", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final BooleanArrayState state; + + private final List channels; + + private final DriverContext driverContext; + + public MaxBooleanGroupingAggregatorFunction(List channels, BooleanArrayState state, + DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MaxBooleanGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MaxBooleanGroupingAggregatorFunction(channels, new BooleanArrayState(driverContext.bigArrays(), MaxBooleanAggregator.init()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BooleanBlock valuesBlock = page.getBlock(channels.get(0)); + BooleanVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { + return; + } + BooleanVector max = ((BooleanBlock) maxUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert max.getPositionCount() == seen.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (seen.getBoolean(groupPosition + positionOffset)) { + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), max.getBoolean(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + BooleanArrayState inState = ((MaxBooleanGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + if (inState.hasValue(position)) { + state.set(groupId, MaxBooleanAggregator.combine(state.getOrDefault(groupId), inState.get(position))); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = state.toValuesBlock(selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunction.java new file mode 100644 index 0000000000000..101a6c7f9169a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunction.java @@ -0,0 +1,136 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link MinBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBooleanAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final BooleanState state; + + private final List channels; + + public MinBooleanAggregatorFunction(DriverContext driverContext, List channels, + BooleanState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static MinBooleanAggregatorFunction create(DriverContext driverContext, + List channels) { + return new MinBooleanAggregatorFunction(driverContext, channels, new BooleanState(MinBooleanAggregator.init())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page) { + BooleanBlock block = page.getBlock(channels.get(0)); + BooleanVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + } + + private void addRawVector(BooleanVector vector) { + state.seen(true); + for (int i = 0; i < vector.getPositionCount(); i++) { + state.booleanValue(MinBooleanAggregator.combine(state.booleanValue(), vector.getBoolean(i))); + } + } + + private void addRawBlock(BooleanBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + state.seen(true); + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + state.booleanValue(MinBooleanAggregator.combine(state.booleanValue(), block.getBoolean(i))); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BooleanVector min = ((BooleanBlock) minUncast).asVector(); + assert min.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; + if (seen.getBoolean(0)) { + state.booleanValue(MinBooleanAggregator.combine(state.booleanValue(), min.getBoolean(0))); + state.seen(true); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + if (state.seen() == false) { + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); + return; + } + blocks[offset] = driverContext.blockFactory().newConstantBooleanBlockWith(state.booleanValue(), 1); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..f66dc6e67e0fd --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link MinBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBooleanAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public MinBooleanAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public MinBooleanAggregatorFunction aggregator(DriverContext driverContext) { + return MinBooleanAggregatorFunction.create(driverContext, channels); + } + + @Override + public MinBooleanGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return MinBooleanGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "min of booleans"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..6175cad3924e2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java @@ -0,0 +1,206 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link MinBooleanAggregator}. + * This class is generated. Do not edit it. + */ +public final class MinBooleanGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("min", ElementType.BOOLEAN), + new IntermediateStateDesc("seen", ElementType.BOOLEAN) ); + + private final BooleanArrayState state; + + private final List channels; + + private final DriverContext driverContext; + + public MinBooleanGroupingAggregatorFunction(List channels, BooleanArrayState state, + DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static MinBooleanGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new MinBooleanGroupingAggregatorFunction(channels, new BooleanArrayState(driverContext.bigArrays(), MinBooleanAggregator.init()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + BooleanBlock valuesBlock = page.getBlock(channels.get(0)); + BooleanVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(v))); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, BooleanVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = Math.toIntExact(groups.getInt(g)); + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), values.getBoolean(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { + return; + } + BooleanVector min = ((BooleanBlock) minUncast).asVector(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert min.getPositionCount() == seen.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = Math.toIntExact(groups.getInt(groupPosition)); + if (seen.getBoolean(groupPosition + positionOffset)) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), min.getBoolean(groupPosition + positionOffset))); + } + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + BooleanArrayState inState = ((MinBooleanGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + if (inState.hasValue(position)) { + state.set(groupId, MinBooleanAggregator.combine(state.getOrDefault(groupId), inState.get(position))); + } + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = state.toValuesBlock(selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunction.java similarity index 70% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunction.java index d52d25941780c..8549da42c0d85 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunction.java @@ -17,16 +17,16 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunction} implementation for {@link TopListDoubleAggregator}. + * {@link AggregatorFunction} implementation for {@link TopDoubleAggregator}. * This class is generated. Do not edit it. */ -public final class TopListDoubleAggregatorFunction implements AggregatorFunction { +public final class TopDoubleAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.DOUBLE) ); + new IntermediateStateDesc("top", ElementType.DOUBLE) ); private final DriverContext driverContext; - private final TopListDoubleAggregator.SingleState state; + private final TopDoubleAggregator.SingleState state; private final List channels; @@ -34,8 +34,8 @@ public final class TopListDoubleAggregatorFunction implements AggregatorFunction private final boolean ascending; - public TopListDoubleAggregatorFunction(DriverContext driverContext, List channels, - TopListDoubleAggregator.SingleState state, int limit, boolean ascending) { + public TopDoubleAggregatorFunction(DriverContext driverContext, List channels, + TopDoubleAggregator.SingleState state, int limit, boolean ascending) { this.driverContext = driverContext; this.channels = channels; this.state = state; @@ -43,9 +43,9 @@ public TopListDoubleAggregatorFunction(DriverContext driverContext, List channels, int limit, boolean ascending) { - return new TopListDoubleAggregatorFunction(driverContext, channels, TopListDoubleAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + return new TopDoubleAggregatorFunction(driverContext, channels, TopDoubleAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); } public static List intermediateStateDesc() { @@ -70,7 +70,7 @@ public void addRawInput(Page page) { private void addRawVector(DoubleVector vector) { for (int i = 0; i < vector.getPositionCount(); i++) { - TopListDoubleAggregator.combine(state, vector.getDouble(i)); + TopDoubleAggregator.combine(state, vector.getDouble(i)); } } @@ -82,7 +82,7 @@ private void addRawBlock(DoubleBlock block) { int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - TopListDoubleAggregator.combine(state, block.getDouble(i)); + TopDoubleAggregator.combine(state, block.getDouble(i)); } } } @@ -91,13 +91,13 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - DoubleBlock topList = (DoubleBlock) topListUncast; - assert topList.getPositionCount() == 1; - TopListDoubleAggregator.combineIntermediate(state, topList); + DoubleBlock top = (DoubleBlock) topUncast; + assert top.getPositionCount() == 1; + TopDoubleAggregator.combineIntermediate(state, top); } @Override @@ -107,7 +107,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { - blocks[offset] = TopListDoubleAggregator.evaluateFinal(state, driverContext); + blocks[offset] = TopDoubleAggregator.evaluateFinal(state, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunctionSupplier.java similarity index 57% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionSupplier.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunctionSupplier.java index d8bf91ba85541..36a8763b4a870 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunctionSupplier.java @@ -11,35 +11,34 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunctionSupplier} implementation for {@link TopListIntAggregator}. + * {@link AggregatorFunctionSupplier} implementation for {@link TopDoubleAggregator}. * This class is generated. Do not edit it. */ -public final class TopListIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { +public final class TopDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { private final List channels; private final int limit; private final boolean ascending; - public TopListIntAggregatorFunctionSupplier(List channels, int limit, - boolean ascending) { + public TopDoubleAggregatorFunctionSupplier(List channels, int limit, boolean ascending) { this.channels = channels; this.limit = limit; this.ascending = ascending; } @Override - public TopListIntAggregatorFunction aggregator(DriverContext driverContext) { - return TopListIntAggregatorFunction.create(driverContext, channels, limit, ascending); + public TopDoubleAggregatorFunction aggregator(DriverContext driverContext) { + return TopDoubleAggregatorFunction.create(driverContext, channels, limit, ascending); } @Override - public TopListIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { - return TopListIntGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + public TopDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopDoubleGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); } @Override public String describe() { - return "top_list of ints"; + return "top of doubles"; } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java similarity index 79% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleGroupingAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java index 0e3b98bb0f7e5..c54dce5715846 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java @@ -19,14 +19,14 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link GroupingAggregatorFunction} implementation for {@link TopListDoubleAggregator}. + * {@link GroupingAggregatorFunction} implementation for {@link TopDoubleAggregator}. * This class is generated. Do not edit it. */ -public final class TopListDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { +public final class TopDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.DOUBLE) ); + new IntermediateStateDesc("top", ElementType.DOUBLE) ); - private final TopListDoubleAggregator.GroupingState state; + private final TopDoubleAggregator.GroupingState state; private final List channels; @@ -36,8 +36,8 @@ public final class TopListDoubleGroupingAggregatorFunction implements GroupingAg private final boolean ascending; - public TopListDoubleGroupingAggregatorFunction(List channels, - TopListDoubleAggregator.GroupingState state, DriverContext driverContext, int limit, + public TopDoubleGroupingAggregatorFunction(List channels, + TopDoubleAggregator.GroupingState state, DriverContext driverContext, int limit, boolean ascending) { this.channels = channels; this.state = state; @@ -46,9 +46,9 @@ public TopListDoubleGroupingAggregatorFunction(List channels, this.ascending = ascending; } - public static TopListDoubleGroupingAggregatorFunction create(List channels, + public static TopDoubleGroupingAggregatorFunction create(List channels, DriverContext driverContext, int limit, boolean ascending) { - return new TopListDoubleGroupingAggregatorFunction(channels, TopListDoubleAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + return new TopDoubleGroupingAggregatorFunction(channels, TopDoubleAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); } public static List intermediateStateDesc() { @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListDoubleAggregator.combine(state, groupId, values.getDouble(v)); + TopDoubleAggregator.combine(state, groupId, values.getDouble(v)); } } } @@ -111,7 +111,7 @@ private void addRawInput(int positionOffset, IntVector groups, DoubleBlock value private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); + TopDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } @@ -130,7 +130,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListDoubleAggregator.combine(state, groupId, values.getDouble(v)); + TopDoubleAggregator.combine(state, groupId, values.getDouble(v)); } } } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { int groupId = Math.toIntExact(groups.getInt(g)); - TopListDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); + TopDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); } } } @@ -154,14 +154,14 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); assert channels.size() == intermediateBlockCount(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - DoubleBlock topList = (DoubleBlock) topListUncast; + DoubleBlock top = (DoubleBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListDoubleAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + TopDoubleAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } @@ -170,9 +170,9 @@ public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction inpu if (input.getClass() != getClass()) { throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); } - TopListDoubleAggregator.GroupingState inState = ((TopListDoubleGroupingAggregatorFunction) input).state; + TopDoubleAggregator.GroupingState inState = ((TopDoubleGroupingAggregatorFunction) input).state; state.enableGroupIdTracking(new SeenGroupIds.Empty()); - TopListDoubleAggregator.combineStates(state, groupId, inState, position); + TopDoubleAggregator.combineStates(state, groupId, inState, position); } @Override @@ -183,7 +183,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - blocks[offset] = TopListDoubleAggregator.evaluateFinal(state, selected, driverContext); + blocks[offset] = TopDoubleAggregator.evaluateFinal(state, selected, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunction.java similarity index 71% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunction.java index 6232d6ff21fc9..40ac1432caee8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunction.java @@ -17,16 +17,16 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunction} implementation for {@link TopListFloatAggregator}. + * {@link AggregatorFunction} implementation for {@link TopFloatAggregator}. * This class is generated. Do not edit it. */ -public final class TopListFloatAggregatorFunction implements AggregatorFunction { +public final class TopFloatAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.FLOAT) ); + new IntermediateStateDesc("top", ElementType.FLOAT) ); private final DriverContext driverContext; - private final TopListFloatAggregator.SingleState state; + private final TopFloatAggregator.SingleState state; private final List channels; @@ -34,8 +34,8 @@ public final class TopListFloatAggregatorFunction implements AggregatorFunction private final boolean ascending; - public TopListFloatAggregatorFunction(DriverContext driverContext, List channels, - TopListFloatAggregator.SingleState state, int limit, boolean ascending) { + public TopFloatAggregatorFunction(DriverContext driverContext, List channels, + TopFloatAggregator.SingleState state, int limit, boolean ascending) { this.driverContext = driverContext; this.channels = channels; this.state = state; @@ -43,9 +43,9 @@ public TopListFloatAggregatorFunction(DriverContext driverContext, List this.ascending = ascending; } - public static TopListFloatAggregatorFunction create(DriverContext driverContext, + public static TopFloatAggregatorFunction create(DriverContext driverContext, List channels, int limit, boolean ascending) { - return new TopListFloatAggregatorFunction(driverContext, channels, TopListFloatAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + return new TopFloatAggregatorFunction(driverContext, channels, TopFloatAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); } public static List intermediateStateDesc() { @@ -70,7 +70,7 @@ public void addRawInput(Page page) { private void addRawVector(FloatVector vector) { for (int i = 0; i < vector.getPositionCount(); i++) { - TopListFloatAggregator.combine(state, vector.getFloat(i)); + TopFloatAggregator.combine(state, vector.getFloat(i)); } } @@ -82,7 +82,7 @@ private void addRawBlock(FloatBlock block) { int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - TopListFloatAggregator.combine(state, block.getFloat(i)); + TopFloatAggregator.combine(state, block.getFloat(i)); } } } @@ -91,13 +91,13 @@ private void addRawBlock(FloatBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - FloatBlock topList = (FloatBlock) topListUncast; - assert topList.getPositionCount() == 1; - TopListFloatAggregator.combineIntermediate(state, topList); + FloatBlock top = (FloatBlock) topUncast; + assert top.getPositionCount() == 1; + TopFloatAggregator.combineIntermediate(state, top); } @Override @@ -107,7 +107,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { - blocks[offset] = TopListFloatAggregator.evaluateFinal(state, driverContext); + blocks[offset] = TopFloatAggregator.evaluateFinal(state, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunctionSupplier.java similarity index 57% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionSupplier.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunctionSupplier.java index 617895fbff1a3..e01df8329a315 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunctionSupplier.java @@ -11,35 +11,34 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunctionSupplier} implementation for {@link TopListLongAggregator}. + * {@link AggregatorFunctionSupplier} implementation for {@link TopFloatAggregator}. * This class is generated. Do not edit it. */ -public final class TopListLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { +public final class TopFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { private final List channels; private final int limit; private final boolean ascending; - public TopListLongAggregatorFunctionSupplier(List channels, int limit, - boolean ascending) { + public TopFloatAggregatorFunctionSupplier(List channels, int limit, boolean ascending) { this.channels = channels; this.limit = limit; this.ascending = ascending; } @Override - public TopListLongAggregatorFunction aggregator(DriverContext driverContext) { - return TopListLongAggregatorFunction.create(driverContext, channels, limit, ascending); + public TopFloatAggregatorFunction aggregator(DriverContext driverContext) { + return TopFloatAggregatorFunction.create(driverContext, channels, limit, ascending); } @Override - public TopListLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { - return TopListLongGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + public TopFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopFloatGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); } @Override public String describe() { - return "top_list of longs"; + return "top of floats"; } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java similarity index 79% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatGroupingAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java index 66f8fa7eeb35d..4c00f4d2c237d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java @@ -19,14 +19,14 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link GroupingAggregatorFunction} implementation for {@link TopListFloatAggregator}. + * {@link GroupingAggregatorFunction} implementation for {@link TopFloatAggregator}. * This class is generated. Do not edit it. */ -public final class TopListFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { +public final class TopFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.FLOAT) ); + new IntermediateStateDesc("top", ElementType.FLOAT) ); - private final TopListFloatAggregator.GroupingState state; + private final TopFloatAggregator.GroupingState state; private final List channels; @@ -36,8 +36,8 @@ public final class TopListFloatGroupingAggregatorFunction implements GroupingAgg private final boolean ascending; - public TopListFloatGroupingAggregatorFunction(List channels, - TopListFloatAggregator.GroupingState state, DriverContext driverContext, int limit, + public TopFloatGroupingAggregatorFunction(List channels, + TopFloatAggregator.GroupingState state, DriverContext driverContext, int limit, boolean ascending) { this.channels = channels; this.state = state; @@ -46,9 +46,9 @@ public TopListFloatGroupingAggregatorFunction(List channels, this.ascending = ascending; } - public static TopListFloatGroupingAggregatorFunction create(List channels, + public static TopFloatGroupingAggregatorFunction create(List channels, DriverContext driverContext, int limit, boolean ascending) { - return new TopListFloatGroupingAggregatorFunction(channels, TopListFloatAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + return new TopFloatGroupingAggregatorFunction(channels, TopFloatAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); } public static List intermediateStateDesc() { @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListFloatAggregator.combine(state, groupId, values.getFloat(v)); + TopFloatAggregator.combine(state, groupId, values.getFloat(v)); } } } @@ -111,7 +111,7 @@ private void addRawInput(int positionOffset, IntVector groups, FloatBlock values private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + TopFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } @@ -130,7 +130,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListFloatAggregator.combine(state, groupId, values.getFloat(v)); + TopFloatAggregator.combine(state, groupId, values.getFloat(v)); } } } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { int groupId = Math.toIntExact(groups.getInt(g)); - TopListFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + TopFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); } } } @@ -154,14 +154,14 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); assert channels.size() == intermediateBlockCount(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - FloatBlock topList = (FloatBlock) topListUncast; + FloatBlock top = (FloatBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListFloatAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + TopFloatAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } @@ -170,9 +170,9 @@ public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction inpu if (input.getClass() != getClass()) { throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); } - TopListFloatAggregator.GroupingState inState = ((TopListFloatGroupingAggregatorFunction) input).state; + TopFloatAggregator.GroupingState inState = ((TopFloatGroupingAggregatorFunction) input).state; state.enableGroupIdTracking(new SeenGroupIds.Empty()); - TopListFloatAggregator.combineStates(state, groupId, inState, position); + TopFloatAggregator.combineStates(state, groupId, inState, position); } @Override @@ -183,7 +183,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - blocks[offset] = TopListFloatAggregator.evaluateFinal(state, selected, driverContext); + blocks[offset] = TopFloatAggregator.evaluateFinal(state, selected, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntAggregatorFunction.java similarity index 70% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntAggregatorFunction.java index e885b285c4a51..f6e858b69a639 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntAggregatorFunction.java @@ -17,16 +17,16 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunction} implementation for {@link TopListIntAggregator}. + * {@link AggregatorFunction} implementation for {@link TopIntAggregator}. * This class is generated. Do not edit it. */ -public final class TopListIntAggregatorFunction implements AggregatorFunction { +public final class TopIntAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT) ); private final DriverContext driverContext; - private final TopListIntAggregator.SingleState state; + private final TopIntAggregator.SingleState state; private final List channels; @@ -34,8 +34,8 @@ public final class TopListIntAggregatorFunction implements AggregatorFunction { private final boolean ascending; - public TopListIntAggregatorFunction(DriverContext driverContext, List channels, - TopListIntAggregator.SingleState state, int limit, boolean ascending) { + public TopIntAggregatorFunction(DriverContext driverContext, List channels, + TopIntAggregator.SingleState state, int limit, boolean ascending) { this.driverContext = driverContext; this.channels = channels; this.state = state; @@ -43,9 +43,9 @@ public TopListIntAggregatorFunction(DriverContext driverContext, List c this.ascending = ascending; } - public static TopListIntAggregatorFunction create(DriverContext driverContext, - List channels, int limit, boolean ascending) { - return new TopListIntAggregatorFunction(driverContext, channels, TopListIntAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + public static TopIntAggregatorFunction create(DriverContext driverContext, List channels, + int limit, boolean ascending) { + return new TopIntAggregatorFunction(driverContext, channels, TopIntAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); } public static List intermediateStateDesc() { @@ -70,7 +70,7 @@ public void addRawInput(Page page) { private void addRawVector(IntVector vector) { for (int i = 0; i < vector.getPositionCount(); i++) { - TopListIntAggregator.combine(state, vector.getInt(i)); + TopIntAggregator.combine(state, vector.getInt(i)); } } @@ -82,7 +82,7 @@ private void addRawBlock(IntBlock block) { int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - TopListIntAggregator.combine(state, block.getInt(i)); + TopIntAggregator.combine(state, block.getInt(i)); } } } @@ -91,13 +91,13 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntBlock topList = (IntBlock) topListUncast; - assert topList.getPositionCount() == 1; - TopListIntAggregator.combineIntermediate(state, topList); + IntBlock top = (IntBlock) topUncast; + assert top.getPositionCount() == 1; + TopIntAggregator.combineIntermediate(state, top); } @Override @@ -107,7 +107,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { - blocks[offset] = TopListIntAggregator.evaluateFinal(state, driverContext); + blocks[offset] = TopIntAggregator.evaluateFinal(state, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntAggregatorFunctionSupplier.java similarity index 56% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionSupplier.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntAggregatorFunctionSupplier.java index ff1c3e8df4b46..4481f2d5afaa8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntAggregatorFunctionSupplier.java @@ -11,35 +11,34 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunctionSupplier} implementation for {@link TopListFloatAggregator}. + * {@link AggregatorFunctionSupplier} implementation for {@link TopIntAggregator}. * This class is generated. Do not edit it. */ -public final class TopListFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { +public final class TopIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { private final List channels; private final int limit; private final boolean ascending; - public TopListFloatAggregatorFunctionSupplier(List channels, int limit, - boolean ascending) { + public TopIntAggregatorFunctionSupplier(List channels, int limit, boolean ascending) { this.channels = channels; this.limit = limit; this.ascending = ascending; } @Override - public TopListFloatAggregatorFunction aggregator(DriverContext driverContext) { - return TopListFloatAggregatorFunction.create(driverContext, channels, limit, ascending); + public TopIntAggregatorFunction aggregator(DriverContext driverContext) { + return TopIntAggregatorFunction.create(driverContext, channels, limit, ascending); } @Override - public TopListFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { - return TopListFloatGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + public TopIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopIntGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); } @Override public String describe() { - return "top_list of floats"; + return "top of ints"; } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java similarity index 80% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntGroupingAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java index 820ebb95e530c..37384238b7297 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java @@ -17,14 +17,14 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link GroupingAggregatorFunction} implementation for {@link TopListIntAggregator}. + * {@link GroupingAggregatorFunction} implementation for {@link TopIntAggregator}. * This class is generated. Do not edit it. */ -public final class TopListIntGroupingAggregatorFunction implements GroupingAggregatorFunction { +public final class TopIntGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT) ); - private final TopListIntAggregator.GroupingState state; + private final TopIntAggregator.GroupingState state; private final List channels; @@ -34,8 +34,8 @@ public final class TopListIntGroupingAggregatorFunction implements GroupingAggre private final boolean ascending; - public TopListIntGroupingAggregatorFunction(List channels, - TopListIntAggregator.GroupingState state, DriverContext driverContext, int limit, + public TopIntGroupingAggregatorFunction(List channels, + TopIntAggregator.GroupingState state, DriverContext driverContext, int limit, boolean ascending) { this.channels = channels; this.state = state; @@ -44,9 +44,9 @@ public TopListIntGroupingAggregatorFunction(List channels, this.ascending = ascending; } - public static TopListIntGroupingAggregatorFunction create(List channels, + public static TopIntGroupingAggregatorFunction create(List channels, DriverContext driverContext, int limit, boolean ascending) { - return new TopListIntGroupingAggregatorFunction(channels, TopListIntAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + return new TopIntGroupingAggregatorFunction(channels, TopIntAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); } public static List intermediateStateDesc() { @@ -101,7 +101,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListIntAggregator.combine(state, groupId, values.getInt(v)); + TopIntAggregator.combine(state, groupId, values.getInt(v)); } } } @@ -109,7 +109,7 @@ private void addRawInput(int positionOffset, IntVector groups, IntBlock values) private void addRawInput(int positionOffset, IntVector groups, IntVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); + TopIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } @@ -128,7 +128,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListIntAggregator.combine(state, groupId, values.getInt(v)); + TopIntAggregator.combine(state, groupId, values.getInt(v)); } } } @@ -143,7 +143,7 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { int groupId = Math.toIntExact(groups.getInt(g)); - TopListIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); + TopIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); } } } @@ -152,14 +152,14 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); assert channels.size() == intermediateBlockCount(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntBlock topList = (IntBlock) topListUncast; + IntBlock top = (IntBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListIntAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + TopIntAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } @@ -168,9 +168,9 @@ public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction inpu if (input.getClass() != getClass()) { throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); } - TopListIntAggregator.GroupingState inState = ((TopListIntGroupingAggregatorFunction) input).state; + TopIntAggregator.GroupingState inState = ((TopIntGroupingAggregatorFunction) input).state; state.enableGroupIdTracking(new SeenGroupIds.Empty()); - TopListIntAggregator.combineStates(state, groupId, inState, position); + TopIntAggregator.combineStates(state, groupId, inState, position); } @Override @@ -181,7 +181,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - blocks[offset] = TopListIntAggregator.evaluateFinal(state, selected, driverContext); + blocks[offset] = TopIntAggregator.evaluateFinal(state, selected, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongAggregatorFunction.java similarity index 71% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongAggregatorFunction.java index 1a09a1a860e2f..c355e401478d8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongAggregatorFunction.java @@ -17,16 +17,16 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunction} implementation for {@link TopListLongAggregator}. + * {@link AggregatorFunction} implementation for {@link TopLongAggregator}. * This class is generated. Do not edit it. */ -public final class TopListLongAggregatorFunction implements AggregatorFunction { +public final class TopLongAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.LONG) ); + new IntermediateStateDesc("top", ElementType.LONG) ); private final DriverContext driverContext; - private final TopListLongAggregator.SingleState state; + private final TopLongAggregator.SingleState state; private final List channels; @@ -34,8 +34,8 @@ public final class TopListLongAggregatorFunction implements AggregatorFunction { private final boolean ascending; - public TopListLongAggregatorFunction(DriverContext driverContext, List channels, - TopListLongAggregator.SingleState state, int limit, boolean ascending) { + public TopLongAggregatorFunction(DriverContext driverContext, List channels, + TopLongAggregator.SingleState state, int limit, boolean ascending) { this.driverContext = driverContext; this.channels = channels; this.state = state; @@ -43,9 +43,9 @@ public TopListLongAggregatorFunction(DriverContext driverContext, List this.ascending = ascending; } - public static TopListLongAggregatorFunction create(DriverContext driverContext, + public static TopLongAggregatorFunction create(DriverContext driverContext, List channels, int limit, boolean ascending) { - return new TopListLongAggregatorFunction(driverContext, channels, TopListLongAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); + return new TopLongAggregatorFunction(driverContext, channels, TopLongAggregator.initSingle(driverContext.bigArrays(), limit, ascending), limit, ascending); } public static List intermediateStateDesc() { @@ -70,7 +70,7 @@ public void addRawInput(Page page) { private void addRawVector(LongVector vector) { for (int i = 0; i < vector.getPositionCount(); i++) { - TopListLongAggregator.combine(state, vector.getLong(i)); + TopLongAggregator.combine(state, vector.getLong(i)); } } @@ -82,7 +82,7 @@ private void addRawBlock(LongBlock block) { int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - TopListLongAggregator.combine(state, block.getLong(i)); + TopLongAggregator.combine(state, block.getLong(i)); } } } @@ -91,13 +91,13 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - LongBlock topList = (LongBlock) topListUncast; - assert topList.getPositionCount() == 1; - TopListLongAggregator.combineIntermediate(state, topList); + LongBlock top = (LongBlock) topUncast; + assert top.getPositionCount() == 1; + TopLongAggregator.combineIntermediate(state, top); } @Override @@ -107,7 +107,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { - blocks[offset] = TopListLongAggregator.evaluateFinal(state, driverContext); + blocks[offset] = TopLongAggregator.evaluateFinal(state, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongAggregatorFunctionSupplier.java similarity index 56% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionSupplier.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongAggregatorFunctionSupplier.java index 48df091d339b6..1a39c7b5580ec 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongAggregatorFunctionSupplier.java @@ -11,35 +11,34 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunctionSupplier} implementation for {@link TopListDoubleAggregator}. + * {@link AggregatorFunctionSupplier} implementation for {@link TopLongAggregator}. * This class is generated. Do not edit it. */ -public final class TopListDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { +public final class TopLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { private final List channels; private final int limit; private final boolean ascending; - public TopListDoubleAggregatorFunctionSupplier(List channels, int limit, - boolean ascending) { + public TopLongAggregatorFunctionSupplier(List channels, int limit, boolean ascending) { this.channels = channels; this.limit = limit; this.ascending = ascending; } @Override - public TopListDoubleAggregatorFunction aggregator(DriverContext driverContext) { - return TopListDoubleAggregatorFunction.create(driverContext, channels, limit, ascending); + public TopLongAggregatorFunction aggregator(DriverContext driverContext) { + return TopLongAggregatorFunction.create(driverContext, channels, limit, ascending); } @Override - public TopListDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { - return TopListDoubleGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); + public TopLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return TopLongGroupingAggregatorFunction.create(channels, driverContext, limit, ascending); } @Override public String describe() { - return "top_list of doubles"; + return "top of longs"; } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java similarity index 80% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongGroupingAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java index cadb48b7d29d4..7b199b2a81389 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopListLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java @@ -19,14 +19,14 @@ import org.elasticsearch.compute.operator.DriverContext; /** - * {@link GroupingAggregatorFunction} implementation for {@link TopListLongAggregator}. + * {@link GroupingAggregatorFunction} implementation for {@link TopLongAggregator}. * This class is generated. Do not edit it. */ -public final class TopListLongGroupingAggregatorFunction implements GroupingAggregatorFunction { +public final class TopLongGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("topList", ElementType.LONG) ); + new IntermediateStateDesc("top", ElementType.LONG) ); - private final TopListLongAggregator.GroupingState state; + private final TopLongAggregator.GroupingState state; private final List channels; @@ -36,8 +36,8 @@ public final class TopListLongGroupingAggregatorFunction implements GroupingAggr private final boolean ascending; - public TopListLongGroupingAggregatorFunction(List channels, - TopListLongAggregator.GroupingState state, DriverContext driverContext, int limit, + public TopLongGroupingAggregatorFunction(List channels, + TopLongAggregator.GroupingState state, DriverContext driverContext, int limit, boolean ascending) { this.channels = channels; this.state = state; @@ -46,9 +46,9 @@ public TopListLongGroupingAggregatorFunction(List channels, this.ascending = ascending; } - public static TopListLongGroupingAggregatorFunction create(List channels, + public static TopLongGroupingAggregatorFunction create(List channels, DriverContext driverContext, int limit, boolean ascending) { - return new TopListLongGroupingAggregatorFunction(channels, TopListLongAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); + return new TopLongGroupingAggregatorFunction(channels, TopLongAggregator.initGrouping(driverContext.bigArrays(), limit, ascending), driverContext, limit, ascending); } public static List intermediateStateDesc() { @@ -103,7 +103,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListLongAggregator.combine(state, groupId, values.getLong(v)); + TopLongAggregator.combine(state, groupId, values.getLong(v)); } } } @@ -111,7 +111,7 @@ private void addRawInput(int positionOffset, IntVector groups, LongBlock values) private void addRawInput(int positionOffset, IntVector groups, LongVector values) { for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); + TopLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } @@ -130,7 +130,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - TopListLongAggregator.combine(state, groupId, values.getLong(v)); + TopLongAggregator.combine(state, groupId, values.getLong(v)); } } } @@ -145,7 +145,7 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { int groupId = Math.toIntExact(groups.getInt(g)); - TopListLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); + TopLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); } } } @@ -154,14 +154,14 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); assert channels.size() == intermediateBlockCount(); - Block topListUncast = page.getBlock(channels.get(0)); - if (topListUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - LongBlock topList = (LongBlock) topListUncast; + LongBlock top = (LongBlock) topUncast; for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = Math.toIntExact(groups.getInt(groupPosition)); - TopListLongAggregator.combineIntermediate(state, groupId, topList, groupPosition + positionOffset); + TopLongAggregator.combineIntermediate(state, groupId, top, groupPosition + positionOffset); } } @@ -170,9 +170,9 @@ public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction inpu if (input.getClass() != getClass()) { throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); } - TopListLongAggregator.GroupingState inState = ((TopListLongGroupingAggregatorFunction) input).state; + TopLongAggregator.GroupingState inState = ((TopLongGroupingAggregatorFunction) input).state; state.enableGroupIdTracking(new SeenGroupIds.Empty()); - TopListLongAggregator.combineStates(state, groupId, inState, position); + TopLongAggregator.combineStates(state, groupId, inState, position); } @Override @@ -183,7 +183,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - blocks[offset] = TopListLongAggregator.evaluateFinal(state, selected, driverContext); + blocks[offset] = TopLongAggregator.evaluateFinal(state, selected, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialAggregatorFunction.java new file mode 100644 index 0000000000000..8d4ce2971d34b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialAggregatorFunction.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.CompositeBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +import java.util.List; + +/** + * @see ToPartialGroupingAggregatorFunction + */ +public class FromPartialAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("partial", ElementType.COMPOSITE, "partial_agg") + ); + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + private final DriverContext driverContext; + private final GroupingAggregatorFunction groupingAggregator; + private final int inputChannel; + private boolean receivedInput = false; + + public FromPartialAggregatorFunction(DriverContext driverContext, GroupingAggregatorFunction groupingAggregator, int inputChannel) { + this.driverContext = driverContext; + this.groupingAggregator = groupingAggregator; + this.inputChannel = inputChannel; + } + + @Override + public void addRawInput(Page page) { + addIntermediateInput(page); + } + + @Override + public void addIntermediateInput(Page page) { + try (IntVector groupIds = driverContext.blockFactory().newConstantIntVector(0, page.getPositionCount())) { + if (page.getPositionCount() > 0) { + receivedInput = true; + } + final CompositeBlock inputBlock = page.getBlock(inputChannel); + groupingAggregator.addIntermediateInput(0, groupIds, inputBlock.asPage()); + } + } + + private IntVector outputPositions() { + return driverContext.blockFactory().newConstantIntVector(0, receivedInput ? 1 : 0); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + final Block[] partialBlocks = new Block[groupingAggregator.intermediateBlockCount()]; + boolean success = false; + try (IntVector selected = outputPositions()) { + groupingAggregator.evaluateIntermediate(partialBlocks, 0, selected); + blocks[offset] = new CompositeBlock(partialBlocks); + success = true; + } finally { + if (success == false) { + Releasables.close(partialBlocks); + } + } + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + try (IntVector selected = outputPositions()) { + groupingAggregator.evaluateFinal(blocks, offset, selected, driverContext); + } + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void close() { + Releasables.close(groupingAggregator); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "[" + "channel=" + inputChannel + ",delegate=" + groupingAggregator + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..675fbe88f1984 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.CompositeBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +import java.util.List; + +/** + * @see ToPartialGroupingAggregatorFunction + */ +public class FromPartialGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("partial", ElementType.COMPOSITE, "partial_agg") + ); + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + private final GroupingAggregatorFunction delegate; + private final int inputChannel; + + public FromPartialGroupingAggregatorFunction(GroupingAggregatorFunction delegate, int inputChannel) { + this.delegate = delegate; + this.inputChannel = inputChannel; + } + + @Override + public AddInput prepareProcessPage(SeenGroupIds seenGroupIds, Page page) { + return new AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + assert false : "Intermediate group id must not have nulls"; + throw new IllegalStateException("Intermediate group id must not have nulls"); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addIntermediateInput(positionOffset, groupIds, page); + } + }; + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Page page) { + final CompositeBlock inputBlock = page.getBlock(inputChannel); + delegate.addIntermediateInput(positionOffset, groupIdVector, inputBlock.asPage()); + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input instanceof FromPartialGroupingAggregatorFunction toPartial) { + input = toPartial.delegate; + } + delegate.addIntermediateRowInput(groupId, input, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + Block[] partialBlocks = new Block[delegate.intermediateBlockCount()]; + boolean success = false; + try { + delegate.evaluateIntermediate(partialBlocks, 0, selected); + blocks[offset] = new CompositeBlock(partialBlocks); + success = true; + } finally { + if (success == false) { + Releasables.close(partialBlocks); + } + } + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + delegate.evaluateFinal(blocks, offset, selected, driverContext); + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void close() { + Releasables.close(delegate); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "[" + "channel=" + inputChannel + ",delegate=" + delegate + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IntermediateStateDesc.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IntermediateStateDesc.java index 22766c36953c4..ced3ffaef970e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IntermediateStateDesc.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/IntermediateStateDesc.java @@ -10,4 +10,8 @@ import org.elasticsearch.compute.data.ElementType; /** Intermediate aggregation state descriptor. Intermediate state is a list of these. */ -public record IntermediateStateDesc(String name, ElementType type) {} +public record IntermediateStateDesc(String name, ElementType type, String dataType) { + public IntermediateStateDesc(String name, ElementType type) { + this(name, type, ""); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregator.java new file mode 100644 index 0000000000000..79d0cd4d7492f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregator.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +@Aggregator({ @IntermediateState(name = "max", type = "BOOLEAN"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MaxBooleanAggregator { + + public static boolean init() { + return false; + } + + public static boolean combine(boolean current, boolean v) { + return current || v; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java index ee6555c4af67d..f0804278e5002 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregator.java @@ -16,7 +16,7 @@ class MaxDoubleAggregator { public static double init() { - return Double.MIN_VALUE; + return -Double.MAX_VALUE; } public static double combine(double current, double v) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBooleanAggregator.java new file mode 100644 index 0000000000000..372a5d988688f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MinBooleanAggregator.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +@Aggregator({ @IntermediateState(name = "min", type = "BOOLEAN"), @IntermediateState(name = "seen", type = "BOOLEAN") }) +@GroupingAggregator +class MinBooleanAggregator { + + public static boolean init() { + return true; + } + + public static boolean combine(boolean current, boolean v) { + return current && v; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialAggregatorFunction.java new file mode 100644 index 0000000000000..2083b8ebbfff2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialAggregatorFunction.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.CompositeBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +import java.util.List; + +/** + * @see ToPartialGroupingAggregatorFunction + */ +public class ToPartialAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("partial", ElementType.COMPOSITE, "partial_agg") + ); + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + private final AggregatorFunction delegate; + private final List channels; + + public ToPartialAggregatorFunction(AggregatorFunction delegate, List channels) { + this.delegate = delegate; + this.channels = channels; + } + + @Override + public void addRawInput(Page page) { + delegate.addRawInput(page); + } + + @Override + public void addIntermediateInput(Page page) { + final CompositeBlock inputBlock = page.getBlock(channels.get(0)); + delegate.addIntermediateInput(inputBlock.asPage()); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + final Block[] partialBlocks = new Block[delegate.intermediateBlockCount()]; + boolean success = false; + try { + delegate.evaluateIntermediate(partialBlocks, 0, driverContext); + blocks[offset] = new CompositeBlock(partialBlocks); + success = true; + } finally { + if (success == false) { + Releasables.close(partialBlocks); + } + } + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + evaluateIntermediate(blocks, offset, driverContext); + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void close() { + Releasables.close(delegate); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "[" + "channels=" + channels + ",delegate=" + delegate + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..13d4bd5d6c0d6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialGroupingAggregatorFunction.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.CompositeBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +import java.util.List; + +/** + * An internal aggregate function that always emits intermediate (or partial) output regardless of the aggregate mode. + * The intermediate output should be consumed by {@link FromPartialGroupingAggregatorFunction}, which always receives + * the intermediate input. Since an intermediate aggregate output can consist of multiple blocks, we wrap these output + * blocks in a single composite block. The {@link FromPartialGroupingAggregatorFunction} then unwraps this input block + * into multiple primitive blocks and passes them to the delegating GroupingAggregatorFunction. + * Both of these commands yield the same result, except the second plan executes aggregates twice: + *
      + * ```
      + * | ... before
      + * | af(x) BY g
      + * | ... after
      + * ```
      + * ```
      + * | ... before
      + * | $x = to_partial(af(x)) BY g
      + * | from_partial($x, af(_)) BY g
      + * | ...  after
      + * 
      + * ``` + */ +public class ToPartialGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("partial", ElementType.COMPOSITE, "partial_agg") + ); + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + private final GroupingAggregatorFunction delegate; + private final List channels; + + public ToPartialGroupingAggregatorFunction(GroupingAggregatorFunction delegate, List channels) { + this.delegate = delegate; + this.channels = channels; + } + + @Override + public AddInput prepareProcessPage(SeenGroupIds seenGroupIds, Page page) { + return delegate.prepareProcessPage(seenGroupIds, page); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Page page) { + final CompositeBlock inputBlock = page.getBlock(channels.get(0)); + delegate.addIntermediateInput(positionOffset, groupIdVector, inputBlock.asPage()); + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input instanceof ToPartialGroupingAggregatorFunction toPartial) { + input = toPartial.delegate; + } + delegate.addIntermediateRowInput(groupId, input, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + final Block[] partialBlocks = new Block[delegate.intermediateBlockCount()]; + boolean success = false; + try { + delegate.evaluateIntermediate(partialBlocks, 0, selected); + blocks[offset] = new CompositeBlock(partialBlocks); + success = true; + } finally { + if (success == false) { + Releasables.close(partialBlocks); + } + } + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + evaluateIntermediate(blocks, offset, selected); + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void close() { + Releasables.close(delegate); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "[" + "channels=" + channels + ",delegate=" + delegate + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st index 18686928f14a8..10dbd9f423725 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st @@ -8,7 +8,11 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.util.BigArrays; +$if(boolean)$ +import org.elasticsearch.common.util.BitArray; +$else$ import org.elasticsearch.common.util.$Type$Array; +$endif$ import org.elasticsearch.compute.data.Block; $if(long)$ import org.elasticsearch.compute.data.IntVector; @@ -17,7 +21,7 @@ import org.elasticsearch.compute.data.$Type$Block; $if(int)$ import org.elasticsearch.compute.data.$Type$Vector; $endif$ -$if(double||float)$ +$if(boolean||double||float)$ import org.elasticsearch.compute.data.IntVector; $endif$ import org.elasticsearch.compute.operator.DriverContext; @@ -41,11 +45,22 @@ import org.elasticsearch.core.Releasables; final class $Type$ArrayState extends AbstractArrayState implements GroupingAggregatorState { private final $type$ init; +$if(boolean)$ + private BitArray values; + private int size; + +$else$ private $Type$Array values; +$endif$ $Type$ArrayState(BigArrays bigArrays, $type$ init) { super(bigArrays); +$if(boolean)$ + this.values = new BitArray(1, bigArrays); + this.size = 1; +$else$ this.values = bigArrays.new$Type$Array(1, false); +$endif$ this.values.set(0, init); this.init = init; } @@ -95,11 +110,18 @@ $endif$ } private void ensureCapacity(int groupId) { +$if(boolean)$ + if (groupId >= size) { + values.fill(size, groupId + 1, init); + size = groupId + 1; + } +$else$ if (groupId >= values.size()) { long prevSize = values.size(); values = bigArrays.grow(values, groupId + 1); values.fill(prevSize, values.size(), init); } +$endif$ } /** Extracts an intermediate view of the contents of this state. */ @@ -120,7 +142,7 @@ $endif$ if (group < values.size()) { valuesBuilder.append$Type$(values.get(group)); } else { - valuesBuilder.append$Type$(0); // TODO can we just use null? + valuesBuilder.append$Type$($if(boolean)$false$else$0$endif$); // TODO can we just use null? } hasValueBuilder.appendBoolean(i, hasValue(group)); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st index 427d1a0c312cc..2d2d706c9454f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-State.java.st @@ -19,7 +19,11 @@ final class $Type$State implements AggregatorState { private boolean seen; $Type$State() { +$if(boolean)$ + this(false); +$else$ this(0); +$endif$ } $Type$State($type$ init) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopAggregator.java.st similarity index 97% rename from x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st rename to x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopAggregator.java.st index 810311154503e..41d0224f37214 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopAggregator.java.st @@ -29,9 +29,9 @@ import org.elasticsearch.search.sort.SortOrder; /** * Aggregates the top N field values for $type$. */ -@Aggregator({ @IntermediateState(name = "topList", type = "$TYPE$_BLOCK") }) +@Aggregator({ @IntermediateState(name = "top", type = "$TYPE$_BLOCK") }) @GroupingAggregator -class TopList$Type$Aggregator { +class Top$Type$Aggregator { public static SingleState initSingle(BigArrays bigArrays, int limit, boolean ascending) { return new SingleState(bigArrays, limit, ascending); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/RowInTableLookup.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/RowInTableLookup.java index 1303fc701c595..4fa582e761e18 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/RowInTableLookup.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/RowInTableLookup.java @@ -45,13 +45,8 @@ public static RowInTableLookup build(BlockFactory blockFactory, Block[] keys) { "keys must have the same number of positions but [" + positions + "] != [" + keys[k].getPositionCount() + "]" ); } - if (keys[k].mayHaveMultivaluedFields()) { - for (int p = 0; p < keys[k].getPositionCount(); p++) { - if (keys[k].getValueCount(p) > 1) { - // TODO double check these errors over REST once we have LOOKUP - throw new IllegalArgumentException("only single valued keys are supported"); - } - } + if (keys[k].doesHaveMultivaluedFields()) { + throw new IllegalArgumentException("only single valued keys are supported"); } } if (positions == 0) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java index 9b56c2f6bd63f..22f8dded57320 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java @@ -46,6 +46,19 @@ public final boolean mayHaveMultivaluedFields() { return firstValueIndexes != null; } + @Override + public boolean doesHaveMultivaluedFields() { + if (false == mayHaveMultivaluedFields()) { + return false; + } + for (int p = 0; p < getPositionCount(); p++) { + if (getValueCount(p) > 1) { + return true; + } + } + return false; + } + @Override public final MvOrdering mvOrdering() { return mvOrdering; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java index fb52cc39f44d2..b70d738946048 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBlock.java @@ -46,6 +46,11 @@ public final boolean mayHaveMultivaluedFields() { return false; } + @Override + public boolean doesHaveMultivaluedFields() { + return false; + } + @Override public final MvOrdering mvOrdering() { return MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 282bc9064b308..302defa03473f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -116,10 +116,21 @@ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, R /** * Can this block have multivalued fields? Blocks that return {@code false} - * will never return more than one from {@link #getValueCount}. + * will never return more than one from {@link #getValueCount}. This may + * return {@code true} for Blocks that do not have multivalued fields, but + * it will always answer quickly. */ boolean mayHaveMultivaluedFields(); + /** + * Does this block have multivalued fields? Unlike {@link #mayHaveMultivaluedFields} + * this will never return a false positive. In other words, if this returns + * {@code true} then there are positions for which {@link #getValueCount} + * will return more than 1. This will answer quickly if it can but may have + * to check all positions. + */ + boolean doesHaveMultivaluedFields(); + /** * Creates a new block that only exposes the positions provided. * @param positions the positions to retain diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/CompositeBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/CompositeBlock.java index c107ea53bd7f4..dbbcf905a2e57 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/CompositeBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/CompositeBlock.java @@ -58,6 +58,10 @@ public B getBlock(int blockIndex) { return block; } + public Page asPage() { + return new Page(positionCount, blocks); + } + /** * Returns the number of blocks in this composite block. */ @@ -132,6 +136,14 @@ public boolean mayHaveMultivaluedFields() { return Arrays.stream(blocks).anyMatch(Block::mayHaveMultivaluedFields); } + @Override + public boolean doesHaveMultivaluedFields() { + if (false == Arrays.stream(blocks).anyMatch(Block::mayHaveMultivaluedFields)) { + return false; + } + return Arrays.stream(blocks).anyMatch(Block::doesHaveMultivaluedFields); + } + @Override public CompositeBlock filter(int... positions) { CompositeBlock result = null; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index 2c0f4c8946753..876cbc499bec6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -68,6 +68,11 @@ public boolean mayHaveMultivaluedFields() { return false; } + @Override + public boolean doesHaveMultivaluedFields() { + return false; + } + @Override public ElementType elementType() { return ElementType.NULL; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java index 321c319f06671..b8ac46e790acd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java @@ -191,6 +191,11 @@ public boolean mayHaveMultivaluedFields() { return ordinals.mayHaveMultivaluedFields(); } + @Override + public boolean doesHaveMultivaluedFields() { + return ordinals.mayHaveMultivaluedFields(); + } + @Override public MvOrdering mvOrdering() { return ordinals.mvOrdering(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 061cefc86bed0..0fed88370a144 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -21,13 +21,11 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; /** @@ -40,7 +38,7 @@ public abstract class AsyncOperator implements Operator { private volatile SubscribableListener blockedFuture; private final Map buffers = ConcurrentCollections.newConcurrentMap(); - private final AtomicReference failure = new AtomicReference<>(); + private final FailureCollector failureCollector = new FailureCollector(); private final DriverContext driverContext; private final int maxOutstandingRequests; @@ -77,7 +75,7 @@ public boolean needsInput() { @Override public void addInput(Page input) { - if (failure.get() != null) { + if (failureCollector.hasFailure()) { input.releaseBlocks(); return; } @@ -90,7 +88,7 @@ public void addInput(Page input) { onSeqNoCompleted(seqNo); }, e -> { releasePageOnAnyThread(input); - onFailure(e); + failureCollector.unwrapAndCollect(e); onSeqNoCompleted(seqNo); }); final long startNanos = System.nanoTime(); @@ -121,31 +119,12 @@ private void releasePageOnAnyThread(Page page) { protected abstract void doClose(); - private void onFailure(Exception e) { - failure.getAndUpdate(first -> { - if (first == null) { - return e; - } - // ignore subsequent TaskCancelledException exceptions as they don't provide useful info. - if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { - return first; - } - if (ExceptionsHelper.unwrap(first, TaskCancelledException.class) != null) { - return e; - } - if (ExceptionsHelper.unwrapCause(first) != ExceptionsHelper.unwrapCause(e)) { - first.addSuppressed(e); - } - return first; - }); - } - private void onSeqNoCompleted(long seqNo) { checkpoint.markSeqNoAsProcessed(seqNo); if (checkpoint.getPersistedCheckpoint() < checkpoint.getProcessedCheckpoint()) { notifyIfBlocked(); } - if (closed || failure.get() != null) { + if (closed || failureCollector.hasFailure()) { discardPages(); } } @@ -164,7 +143,7 @@ private void notifyIfBlocked() { } private void checkFailure() { - Exception e = failure.get(); + Exception e = failureCollector.getFailure(); if (e != null) { discardPages(); throw ExceptionsHelper.convertToElastic(e); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java index 5de017fbd279e..b427a36566f11 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java @@ -7,14 +7,11 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.tasks.TaskCancelledException; import java.util.List; -import java.util.concurrent.atomic.AtomicReference; /** * Run a set of drivers to completion. @@ -35,8 +32,8 @@ public DriverRunner(ThreadContext threadContext) { * Run all drivers to completion asynchronously. */ public void runToCompletion(List drivers, ActionListener listener) { - AtomicReference failure = new AtomicReference<>(); var responseHeadersCollector = new ResponseHeadersCollector(threadContext); + var failure = new FailureCollector(); CountDown counter = new CountDown(drivers.size()); for (int i = 0; i < drivers.size(); i++) { Driver driver = drivers.get(i); @@ -48,23 +45,7 @@ public void onResponse(Void unused) { @Override public void onFailure(Exception e) { - failure.getAndUpdate(first -> { - if (first == null) { - return e; - } - if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { - return first; - } else { - if (ExceptionsHelper.unwrap(first, TaskCancelledException.class) != null) { - return e; - } else { - if (first != e) { - first.addSuppressed(e); - } - return first; - } - } - }); + failure.unwrapAndCollect(e); for (Driver d : drivers) { if (driver != d) { d.cancel("Driver [" + driver.sessionId() + "] was cancelled or failed"); @@ -77,7 +58,7 @@ private void done() { responseHeadersCollector.collect(); if (counter.countDown()) { responseHeadersCollector.finish(); - Exception error = failure.get(); + Exception error = failure.getFailure(); if (error != null) { listener.onFailure(error); } else { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java new file mode 100644 index 0000000000000..99edab038af31 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.transport.TransportException; + +import java.util.List; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * {@code FailureCollector} is responsible for collecting exceptions that occur in the compute engine. + * The collected exceptions are categorized into task-cancelled and non-task-cancelled exceptions. + * To limit memory usage, this class collects only the first 10 exceptions in each category by default. + * When returning the accumulated failure to the caller, this class prefers non-task-cancelled exceptions + * over task-cancelled ones as they are more useful for diagnosing issues. + */ +public final class FailureCollector { + private final Queue cancelledExceptions = ConcurrentCollections.newQueue(); + private final AtomicInteger cancelledExceptionsCount = new AtomicInteger(); + + private final Queue nonCancelledExceptions = ConcurrentCollections.newQueue(); + private final AtomicInteger nonCancelledExceptionsCount = new AtomicInteger(); + + private final int maxExceptions; + private volatile boolean hasFailure = false; + private Exception finalFailure = null; + + public FailureCollector() { + this(10); + } + + public FailureCollector(int maxExceptions) { + if (maxExceptions <= 0) { + throw new IllegalArgumentException("maxExceptions must be at least one"); + } + this.maxExceptions = maxExceptions; + } + + public void unwrapAndCollect(Exception originEx) { + final Exception e = originEx instanceof TransportException + ? (originEx.getCause() instanceof Exception cause ? cause : new ElasticsearchException(originEx.getCause())) + : originEx; + if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { + if (cancelledExceptionsCount.incrementAndGet() <= maxExceptions) { + cancelledExceptions.add(e); + } + } else { + if (nonCancelledExceptionsCount.incrementAndGet() <= maxExceptions) { + nonCancelledExceptions.add(e); + } + } + hasFailure = true; + } + + /** + * @return {@code true} if any failure has been collected, {@code false} otherwise + */ + public boolean hasFailure() { + return hasFailure; + } + + /** + * Returns the accumulated failure, preferring non-task-cancelled exceptions over task-cancelled ones. + * Once this method builds the failure, incoming failures are discarded. + * + * @return the accumulated failure, or {@code null} if no failure has been collected + */ + public Exception getFailure() { + if (hasFailure == false) { + return null; + } + synchronized (this) { + if (finalFailure == null) { + finalFailure = buildFailure(); + } + return finalFailure; + } + } + + private Exception buildFailure() { + assert hasFailure; + assert Thread.holdsLock(this); + int total = 0; + Exception first = null; + for (var exceptions : List.of(nonCancelledExceptions, cancelledExceptions)) { + for (Exception e : exceptions) { + if (first == null) { + first = e; + total++; + } else if (first != e) { + first.addSuppressed(e); + total++; + } + if (total >= maxExceptions) { + return first; + } + } + } + assert first != null; + return first; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index f647f4fba0225..a365a655370a2 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -250,21 +250,20 @@ public boolean isForceExecution() { protected void doRun() { assert Transports.assertNotTransportThread("reaping inactive exchanges can be expensive"); assert ThreadPool.assertNotScheduleThread("reaping inactive exchanges can be expensive"); + logger.debug("start removing inactive sinks"); final long nowInMillis = threadPool.relativeTimeInMillis(); for (Map.Entry e : sinks.entrySet()) { ExchangeSinkHandler sink = e.getValue(); if (sink.hasData() && sink.hasListeners()) { continue; } - long elapsed = nowInMillis - sink.lastUpdatedTimeInMillis(); - if (elapsed > keepAlive.millis()) { + long elapsedInMillis = nowInMillis - sink.lastUpdatedTimeInMillis(); + if (elapsedInMillis > keepAlive.millis()) { + TimeValue elapsedTime = TimeValue.timeValueMillis(elapsedInMillis); + logger.debug("removed sink {} inactive for {}", e.getKey(), elapsedTime); finishSinkHandler( e.getKey(), - new ElasticsearchTimeoutException( - "Exchange sink {} has been inactive for {}", - e.getKey(), - TimeValue.timeValueMillis(elapsed) - ) + new ElasticsearchTimeoutException("Exchange sink {} has been inactive for {}", e.getKey(), elapsedTime) ); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index adce8d8a88407..77b535949eb9d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -7,21 +7,18 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.FailureCollector; import org.elasticsearch.core.Releasable; -import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.transport.TransportException; import java.util.List; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; /** * An {@link ExchangeSourceHandler} asynchronously fetches pages and status from multiple {@link RemoteSink}s @@ -37,7 +34,7 @@ public final class ExchangeSourceHandler { private final PendingInstances outstandingSinks; private final PendingInstances outstandingSources; - private final AtomicReference failure = new AtomicReference<>(); + private final FailureCollector failure = new FailureCollector(); public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor) { this.buffer = new ExchangeBuffer(maxBufferSize); @@ -54,7 +51,7 @@ private class ExchangeSourceImpl implements ExchangeSource { } private void checkFailure() { - Exception e = failure.get(); + Exception e = failure.getFailure(); if (e != null) { throw ExceptionsHelper.convertToElastic(e); } @@ -172,7 +169,7 @@ void fetchPage() { while (loopControl.isRunning()) { loopControl.exiting(); // finish other sinks if one of them failed or source no longer need pages. - boolean toFinishSinks = buffer.noMoreInputs() || failure.get() != null; + boolean toFinishSinks = buffer.noMoreInputs() || failure.hasFailure(); remoteSink.fetchPageAsync(toFinishSinks, ActionListener.wrap(resp -> { Page page = resp.takePage(); if (page != null) { @@ -199,26 +196,8 @@ void fetchPage() { loopControl.exited(); } - void onSinkFailed(Exception originEx) { - final Exception e = originEx instanceof TransportException - ? (originEx.getCause() instanceof Exception cause ? cause : new ElasticsearchException(originEx.getCause())) - : originEx; - failure.getAndUpdate(first -> { - if (first == null) { - return e; - } - // ignore subsequent TaskCancelledException exceptions as they don't provide useful info. - if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { - return first; - } - if (ExceptionsHelper.unwrap(first, TaskCancelledException.class) != null) { - return e; - } - if (ExceptionsHelper.unwrapCause(first) != ExceptionsHelper.unwrapCause(e)) { - first.addSuppressed(e); - } - return first; - }); + void onSinkFailed(Exception e) { + failure.unwrapAndCollect(e); buffer.waitForReading().onResponse(null); // resume the Driver if it is being blocked on reading onSinkComplete(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionTests.java new file mode 100644 index 0000000000000..11119aade12ff --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxBooleanAggregatorFunctionTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.operator.SequenceBooleanBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxBooleanAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBooleanBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToObj(l -> randomBoolean()).toList()); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxBooleanAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "max of booleans"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Boolean max = input.stream().flatMap(b -> allBooleans(b)).max(Comparator.naturalOrder()).get(); + assertThat(((BooleanBlock) result).getBoolean(0), equalTo(max)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionTests.java new file mode 100644 index 0000000000000..74cdca31da34b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinBooleanAggregatorFunctionTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.operator.SequenceBooleanBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; + +import java.util.Comparator; +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinBooleanAggregatorFunctionTests extends AggregatorFunctionTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceBooleanBlockSourceOperator(blockFactory, IntStream.range(0, size).mapToObj(l -> randomBoolean()).toList()); + } + + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinBooleanAggregatorFunctionSupplier(inputChannels); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "min of booleans"; + } + + @Override + public void assertSimpleOutput(List input, Block result) { + Boolean min = input.stream().flatMap(b -> allBooleans(b)).min(Comparator.naturalOrder()).get(); + assertThat(((BooleanBlock) result).getBoolean(0), equalTo(min)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunctionTests.java similarity index 87% rename from x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionTests.java rename to x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunctionTests.java index f708038776032..817df4ba47130 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopDoubleAggregatorFunctionTests.java @@ -18,7 +18,7 @@ import static org.hamcrest.Matchers.contains; -public class TopListDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase { +public class TopDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase { private static final int LIMIT = 100; @Override @@ -28,12 +28,12 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { - return new TopListDoubleAggregatorFunctionSupplier(inputChannels, LIMIT, true); + return new TopDoubleAggregatorFunctionSupplier(inputChannels, LIMIT, true); } @Override protected String expectedDescriptionOfAggregator() { - return "top_list of doubles"; + return "top of doubles"; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunctionTests.java similarity index 87% rename from x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionTests.java rename to x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunctionTests.java index 98a016783955e..c565a13fb73d4 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListFloatAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopFloatAggregatorFunctionTests.java @@ -18,7 +18,7 @@ import static org.hamcrest.Matchers.contains; -public class TopListFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { +public class TopFloatAggregatorFunctionTests extends AggregatorFunctionTestCase { private static final int LIMIT = 100; @Override @@ -28,12 +28,12 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { - return new TopListFloatAggregatorFunctionSupplier(inputChannels, LIMIT, true); + return new TopFloatAggregatorFunctionSupplier(inputChannels, LIMIT, true); } @Override protected String expectedDescriptionOfAggregator() { - return "top_list of floats"; + return "top of floats"; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopIntAggregatorFunctionTests.java similarity index 87% rename from x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionTests.java rename to x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopIntAggregatorFunctionTests.java index 443604efd5c15..a0ac1a685413e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopIntAggregatorFunctionTests.java @@ -18,7 +18,7 @@ import static org.hamcrest.Matchers.contains; -public class TopListIntAggregatorFunctionTests extends AggregatorFunctionTestCase { +public class TopIntAggregatorFunctionTests extends AggregatorFunctionTestCase { private static final int LIMIT = 100; @Override @@ -28,12 +28,12 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { - return new TopListIntAggregatorFunctionSupplier(inputChannels, LIMIT, true); + return new TopIntAggregatorFunctionSupplier(inputChannels, LIMIT, true); } @Override protected String expectedDescriptionOfAggregator() { - return "top_list of ints"; + return "top of ints"; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopLongAggregatorFunctionTests.java similarity index 87% rename from x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionTests.java rename to x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopLongAggregatorFunctionTests.java index 4a6f101e573b8..7ec1fb9c53053 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopListLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/TopLongAggregatorFunctionTests.java @@ -18,7 +18,7 @@ import static org.hamcrest.Matchers.contains; -public class TopListLongAggregatorFunctionTests extends AggregatorFunctionTestCase { +public class TopLongAggregatorFunctionTests extends AggregatorFunctionTestCase { private static final int LIMIT = 100; @Override @@ -28,12 +28,12 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { - return new TopListLongAggregatorFunctionSupplier(inputChannels, LIMIT, true); + return new TopLongAggregatorFunctionSupplier(inputChannels, LIMIT, true); } @Override protected String expectedDescriptionOfAggregator() { - return "top_list of longs"; + return "top of longs"; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java index 81c32670289c2..311446f184ebf 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java @@ -184,6 +184,7 @@ static void assertSingleValueDenseBlock(Block initialBlock) { assertThat(block.mayHaveNulls(), is(false)); assertThat(block.areAllValuesNull(), is(false)); assertThat(block.mayHaveMultivaluedFields(), is(false)); + assertThat(block.doesHaveMultivaluedFields(), is(false)); initialBlock = block.asVector().asBlock(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java index 89e44a1763b0f..c5e130726844d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java @@ -66,6 +66,7 @@ public void testMultiValued() { } assertThat(b.block().mayHaveMultivaluedFields(), equalTo(b.values().stream().anyMatch(l -> l != null && l.size() > 1))); + assertThat(b.block().doesHaveMultivaluedFields(), equalTo(b.values().stream().anyMatch(l -> l != null && l.size() > 1))); } finally { b.block().close(); } @@ -151,6 +152,8 @@ private void assertFiltered(boolean all, boolean shuffled) { filtered.close(); } assertThat(b.block().mayHaveMultivaluedFields(), equalTo(b.values().stream().anyMatch(l -> l != null && l.size() > 1))); + assertThat(b.block().doesHaveMultivaluedFields(), equalTo(b.values().stream().anyMatch(l -> l != null && l.size() > 1))); + } finally { b.block().close(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java index 66bcf2a57e393..e88f6141049be 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java @@ -62,6 +62,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; @@ -543,6 +544,11 @@ public String indexName() { return "test_index"; } + @Override + public IndexSettings indexSettings() { + throw new UnsupportedOperationException(); + } + @Override public MappedFieldType.FieldExtractPreference fieldExtractPreference() { return MappedFieldType.FieldExtractPreference.NONE; @@ -1687,12 +1693,13 @@ public StoredFieldsSpec rowStrideStoredFieldSpec() { @Override public boolean supportsOrdinals() { - return delegate.supportsOrdinals(); + // Fields with mismatching types cannot use ordinals for uniqueness determination, but must convert the values first + return false; } @Override - public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - return delegate.ordinals(context); + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new IllegalArgumentException("Ordinals are not supported for type conversion"); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index f4c545142508c..848415c4490fa 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.compute.operator.PageConsumerOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; @@ -497,6 +498,11 @@ public String indexName() { return "test_index"; } + @Override + public IndexSettings indexSettings() { + throw new UnsupportedOperationException(); + } + @Override public MappedFieldType.FieldExtractPreference fieldExtractPreference() { return MappedFieldType.FieldExtractPreference.NONE; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java new file mode 100644 index 0000000000000..d5fa0a1eaecc9 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.RemoteTransportException; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.lessThan; + +public class FailureCollectorTests extends ESTestCase { + + public void testCollect() throws Exception { + int maxExceptions = between(1, 100); + FailureCollector collector = new FailureCollector(maxExceptions); + List cancelledExceptions = List.of( + new TaskCancelledException("user request"), + new TaskCancelledException("cross "), + new TaskCancelledException("on failure") + ); + List nonCancelledExceptions = List.of( + new IOException("i/o simulated"), + new IOException("disk broken"), + new CircuitBreakingException("low memory", CircuitBreaker.Durability.TRANSIENT), + new CircuitBreakingException("over limit", CircuitBreaker.Durability.TRANSIENT) + ); + List failures = Stream.concat( + IntStream.range(0, between(1, 500)).mapToObj(n -> randomFrom(cancelledExceptions)), + IntStream.range(0, between(1, 500)).mapToObj(n -> randomFrom(nonCancelledExceptions)) + ).collect(Collectors.toList()); + Randomness.shuffle(failures); + Queue queue = new ConcurrentLinkedQueue<>(failures); + Thread[] threads = new Thread[between(1, 4)]; + CyclicBarrier carrier = new CyclicBarrier(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + try { + carrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + Exception ex; + while ((ex = queue.poll()) != null) { + if (randomBoolean()) { + collector.unwrapAndCollect(ex); + } else { + collector.unwrapAndCollect(new RemoteTransportException("disconnect", ex)); + } + if (randomBoolean()) { + assertTrue(collector.hasFailure()); + } + } + }); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertTrue(collector.hasFailure()); + Exception failure = collector.getFailure(); + assertNotNull(failure); + assertThat(failure, Matchers.in(nonCancelledExceptions)); + assertThat(failure.getSuppressed().length, lessThan(maxExceptions)); + } + + public void testEmpty() { + FailureCollector collector = new FailureCollector(5); + assertFalse(collector.hasFailure()); + assertNull(collector.getFailure()); + } +} diff --git a/x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java b/x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java index 8728b605134ac..46fff385b5398 100644 --- a/x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java +++ b/x-pack/plugin/esql/qa/action/src/internalClusterTest/java/org/elasticsearch/test/esql/qa/action/CoreEsqlActionIT.java @@ -18,6 +18,9 @@ import org.elasticsearch.xpack.core.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.core.esql.action.EsqlQueryRequestBuilder; import org.elasticsearch.xpack.core.esql.action.EsqlQueryResponse; +import org.elasticsearch.xpack.core.esql.action.EsqlResponse; +import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.junit.Before; import java.util.ArrayList; @@ -52,11 +55,14 @@ protected Collection> nodePlugins() { public void testRowTypesAndValues() { var query = "row a = 1, b = \"x\", c = 1000000000000, d = 1.1"; var request = EsqlQueryRequestBuilder.newRequestBuilder(client()).query(query); - try (var queryResp = run(request)) { + try (EsqlQueryResponse queryResp = run(request)) { logger.info("response=" + queryResp); - var resp = queryResp.response(); + EsqlResponse resp = queryResp.response(); assertThat(resp.columns().stream().map(ColumnInfo::name).toList(), contains("a", "b", "c", "d")); - assertThat(resp.columns().stream().map(ColumnInfo::type).toList(), contains("integer", "keyword", "long", "double")); + assertThat( + resp.columns().stream().map(c -> ((ColumnInfoImpl) c).type()).toList(), + contains(DataType.INTEGER, DataType.KEYWORD, DataType.LONG, DataType.DOUBLE) + ); assertThat(getValuesList(resp.rows()), contains(List.of(1, "x", 1000000000000L, 1.1d))); } } @@ -68,7 +74,7 @@ public void testRowStatsProjectGroupByInt() { logger.info("response=" + queryResp); var resp = queryResp.response(); assertThat(resp.columns().stream().map(ColumnInfo::name).toList(), contains("a")); - assertThat(resp.columns().stream().map(ColumnInfo::type).toList(), contains("integer")); + assertThat(resp.columns().stream().map(c -> ((ColumnInfoImpl) c).type()).toList(), contains(DataType.INTEGER)); assertThat(getValuesList(resp.rows()), contains(List.of(1))); } } @@ -80,7 +86,10 @@ public void testFrom() { var resp = queryResp.response(); logger.info("response=" + queryResp); assertThat(resp.columns().stream().map(ColumnInfo::name).toList(), contains("item", "cost", "color", "sale")); - assertThat(resp.columns().stream().map(ColumnInfo::type).toList(), contains("long", "double", "keyword", "date")); + assertThat( + resp.columns().stream().map(c -> ((ColumnInfoImpl) c).type()).toList(), + contains(DataType.LONG, DataType.DOUBLE, DataType.KEYWORD, DataType.DATETIME) + ); // columnar values assertThat(columnValues(resp.column(0)), contains(1L, 2L, 3L, 4L)); assertThat(columnValues(resp.column(1)), contains(1.1d, 2.1d, 3.1d, 4.1d)); diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java index 544eb82fb5ace..443813442165a 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlAsyncSecurityIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql; +import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -36,7 +37,8 @@ protected Response runESQLCommand(String user, String command) throws IOExceptio var respMap = entityAsMap(response.getEntity()); String id = (String) respMap.get("id"); assertThat((boolean) respMap.get("is_running"), either(is(true)).or(is(false))); - var getResponse = runAsyncGet(user, id); + int tries = 0; + Response getResponse = runAsyncGet(user, id); assertOK(getResponse); var deleteResponse = runAsyncDelete(user, id); assertOK(deleteResponse); @@ -98,6 +100,7 @@ private Response runAsync(String user, String command) throws IOException { Request request = new Request("POST", "_query/async"); request.setJsonEntity(Strings.toString(json)); request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); + request.addParameter("error_trace", "true"); logRequest(request); Response response = client().performRequest(request); logResponse(response); @@ -105,19 +108,45 @@ private Response runAsync(String user, String command) throws IOException { } private Response runAsyncGet(String user, String id) throws IOException { - Request getRequest = new Request("GET", "_query/async/" + id + "?wait_for_completion_timeout=60s"); - getRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); - logRequest(getRequest); - var response = client().performRequest(getRequest); - logResponse(response); - return response; + int tries = 0; + while (tries < 10) { + // Sometimes we get 404s fetching the task status. + try { + Request getRequest = new Request("GET", "_query/async/" + id + "?wait_for_completion_timeout=60s"); + getRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); + getRequest.addParameter("error_trace", "true"); + logRequest(getRequest); + var response = client().performRequest(getRequest); + logResponse(response); + return response; + } catch (ResponseException e) { + if (e.getResponse().getStatusLine().getStatusCode() == 404 + && EntityUtils.toString(e.getResponse().getEntity()).contains("no such index [.async-search]")) { + /* + * Work around https://github.com/elastic/elasticsearch/issues/110304 - the .async-search + * index may not exist when we try the fetch, but it should exist on next attempt. + */ + logger.warn("async-search index does not exist", e); + try { + Thread.sleep(1000); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + } else { + throw e; + } + tries++; + } + } + throw new IllegalStateException("couldn't find task status"); } private Response runAsyncDelete(String user, String id) throws IOException { - Request getRequest = new Request("DELETE", "_query/async/" + id); - getRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); - logRequest(getRequest); - var response = client().performRequest(getRequest); + Request deleteRequest = new Request("DELETE", "_query/async/" + id); + deleteRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); + deleteRequest.addParameter("error_trace", "true"); + logRequest(deleteRequest); + var response = client().performRequest(deleteRequest); logResponse(response); return response; } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index d7e146cd6d7c1..faa2eb9bd82b0 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -142,6 +142,7 @@ public void testInsufficientPrivilege() { Exception.class, () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2 | STATS sum=sum(value)") ); + logger.info("error", error); assertThat( error.getMessage(), containsString( diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index cbfa043b9dc5d..5676a8bce3ede 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -56,14 +56,22 @@ public static void cleanUp() { oldClusterTestFeatureService = null; } - public MixedClusterEsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { - super(fileName, groupName, testName, lineNumber, testCase, mode); + public MixedClusterEsqlSpecIT( + String fileName, + String groupName, + String testName, + Integer lineNumber, + CsvTestCase testCase, + String instructions, + Mode mode + ) { + super(fileName, groupName, testName, lineNumber, testCase, instructions, mode); } @Override protected void shouldSkipTest(String testName) throws IOException { super.shouldSkipTest(testName); - assumeTrue("Test " + testName + " is skipped on " + bwcVersion, isEnabled(testName, bwcVersion)); + assumeTrue("Test " + testName + " is skipped on " + bwcVersion, isEnabled(testName, instructions, bwcVersion)); if (mode == ASYNC) { assumeTrue("Async is not supported on " + bwcVersion, supportsAsync()); } @@ -73,4 +81,9 @@ protected void shouldSkipTest(String testName) throws IOException { protected boolean supportsAsync() { return oldClusterHasFeature(ASYNC_QUERY_FEATURE_ID); } + + @Override + protected boolean enableRoundingDoubleValuesOnAsserting() { + return true; + } } diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 807d6cff1966c..fed9f49f1e046 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -88,8 +88,16 @@ public static List readScriptSpec() throws Exception { return testcases; } - public MultiClusterSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { - super(fileName, groupName, testName, lineNumber, convertToRemoteIndices(testCase), mode); + public MultiClusterSpecIT( + String fileName, + String groupName, + String testName, + Integer lineNumber, + CsvTestCase testCase, + String instructions, + Mode mode + ) { + super(fileName, groupName, testName, lineNumber, convertToRemoteIndices(testCase), instructions, mode); } @Override @@ -97,7 +105,10 @@ protected void shouldSkipTest(String testName) throws IOException { super.shouldSkipTest(testName); checkCapabilities(remoteClusterClient(), remoteFeaturesService(), testName, testCase); assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query)); - assumeTrue("Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, Clusters.oldVersion())); + assumeTrue( + "Test " + testName + " is skipped on " + Clusters.oldVersion(), + isEnabled(testName, instructions, Clusters.oldVersion()) + ); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -242,4 +253,9 @@ static boolean hasIndexMetadata(String query) { } return false; } + + @Override + protected boolean enableRoundingDoubleValuesOnAsserting() { + return true; + } } diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java index aeb8fa96d0db3..93385ec9efd89 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java @@ -21,7 +21,20 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { - super(fileName, groupName, testName, lineNumber, testCase, mode); + public EsqlSpecIT( + String fileName, + String groupName, + String testName, + Integer lineNumber, + CsvTestCase testCase, + String instructions, + Mode mode + ) { + super(fileName, groupName, testName, lineNumber, testCase, instructions, mode); + } + + @Override + protected boolean enableRoundingDoubleValuesOnAsserting() { + return true; } } diff --git a/x-pack/plugin/esql/qa/server/single-node/build.gradle b/x-pack/plugin/esql/qa/server/single-node/build.gradle index 10366a500a532..865d7cf5f5e6c 100644 --- a/x-pack/plugin/esql/qa/server/single-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/single-node/build.gradle @@ -7,6 +7,19 @@ dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) javaRestTestImplementation project(xpackModule('esql:qa:server')) yamlRestTestImplementation project(xpackModule('esql:qa:server')) + + javaRestTestImplementation('org.apache.arrow:arrow-vector:16.1.0') + javaRestTestImplementation('org.apache.arrow:arrow-format:16.1.0') + javaRestTestImplementation('org.apache.arrow:arrow-memory-core:16.1.0') + javaRestTestImplementation('org.checkerframework:checker-qual:3.42.0') + javaRestTestImplementation('com.google.flatbuffers:flatbuffers-java:23.5.26') + javaRestTestImplementation("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") + javaRestTestImplementation("com.fasterxml.jackson.core:jackson-core:${versions.jackson}") + javaRestTestImplementation("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") + javaRestTestImplementation("org.slf4j:slf4j-api:${versions.slf4j}") + javaRestTestImplementation("org.slf4j:slf4j-nop:${versions.slf4j}") + javaRestTestImplementation('org.apache.arrow:arrow-memory-unsafe:16.1.0') + dependencies { clusterPlugins project(':plugins:mapper-size') clusterPlugins project(':plugins:mapper-murmur3') @@ -25,6 +38,7 @@ restResources { tasks.named('javaRestTest') { usesDefaultDistribution() maxParallelForks = 1 + jvmArgs('--add-opens=java.base/java.nio=ALL-UNNAMED') } tasks.named('yamlRestTest') { diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/ArrowFormatIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/ArrowFormatIT.java new file mode 100644 index 0000000000000..20d04977d21f3 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/ArrowFormatIT.java @@ -0,0 +1,242 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.IntVector; +import org.apache.arrow.vector.VarBinaryVector; +import org.apache.arrow.vector.VarCharVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.ipc.ArrowStreamReader; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.util.VectorSchemaRootAppender; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class ArrowFormatIT extends ESRestTestCase { + + private static final RootAllocator ALLOCATOR = new RootAllocator(); + + @AfterClass + public static void afterClass() { + ALLOCATOR.close(); + } + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Before + @After + public void assertRequestBreakerEmpty() throws Exception { + EsqlSpecTestCase.assertRequestBreakerEmpty(); + } + + @Before + public void initIndex() throws IOException { + Request request = new Request("PUT", "/arrow-test"); + request.setJsonEntity(""" + { + "mappings": { + "properties": { + "value": { + "type": "integer" + }, + "description": { + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "v": { + "type": "version" + } + } + } + } + """); + assertEquals(200, client().performRequest(request).getStatusLine().getStatusCode()); + + request = new Request("POST", "/_bulk?index=arrow-test&refresh=true"); + // 4 documents with a null in the middle, leading to 3 ESQL pages and 3 Arrow batches + request.setJsonEntity(""" + {"index": {"_id": "1"}} + {"value": 1, "ip": "192.168.0.1", "v": "1.0.1", "description": "number one"} + {"index": {"_id": "2"}} + {"value": 2, "ip": "192.168.0.2", "v": "1.0.2", "description": "number two"} + {"index": {"_id": "3"}} + {"value": 3, "ip": "2001:db8::1:0:0:1"} + {"index": {"_id": "4"}} + {"value": 4, "ip": "::afff:4567:890a", "v": "1.0.4", "description": "number four"} + """); + assertEquals(200, client().performRequest(request).getStatusLine().getStatusCode()); + } + + private VectorSchemaRoot esql(String query) throws IOException { + Request request = new Request("POST", "/_query?format=arrow"); + request.setJsonEntity(query); + Response response = client().performRequest(request); + + assertEquals("application/vnd.apache.arrow.stream", response.getEntity().getContentType().getValue()); + return readArrow(response.getEntity().getContent()); + } + + public void testInteger() throws Exception { + try (VectorSchemaRoot root = esql(""" + { + "query": "FROM arrow-test | SORT value | LIMIT 100 | KEEP value" + }""")) { + List fields = root.getSchema().getFields(); + assertEquals(1, fields.size()); + + assertValues(root); + } + } + + public void testString() throws Exception { + try (VectorSchemaRoot root = esql(""" + { + "query": "FROM arrow-test | SORT value | LIMIT 100 | KEEP description" + }""")) { + List fields = root.getSchema().getFields(); + assertEquals(1, fields.size()); + + assertDescription(root); + } + } + + public void testIp() throws Exception { + try (VectorSchemaRoot root = esql(""" + { + "query": "FROM arrow-test | SORT value | LIMIT 100 | KEEP ip" + }""")) { + List fields = root.getSchema().getFields(); + assertEquals(1, fields.size()); + + assertIp(root); + } + } + + public void testVersion() throws Exception { + try (VectorSchemaRoot root = esql(""" + { + "query": "FROM arrow-test | SORT value | LIMIT 100 | KEEP v" + }""")) { + List fields = root.getSchema().getFields(); + assertEquals(1, fields.size()); + + assertVersion(root); + } + } + + public void testEverything() throws Exception { + try (VectorSchemaRoot root = esql(""" + { + "query": "FROM arrow-test | SORT value | LIMIT 100" + }""")) { + List fields = root.getSchema().getFields(); + assertEquals(4, fields.size()); + + assertDescription(root); + assertValues(root); + assertIp(root); + assertVersion(root); + } + } + + private VectorSchemaRoot readArrow(InputStream input) throws IOException { + try ( + ArrowStreamReader reader = new ArrowStreamReader(input, ALLOCATOR); + VectorSchemaRoot readerRoot = reader.getVectorSchemaRoot(); + ) { + VectorSchemaRoot root = VectorSchemaRoot.create(readerRoot.getSchema(), ALLOCATOR); + root.allocateNew(); + + while (reader.loadNextBatch()) { + VectorSchemaRootAppender.append(root, readerRoot); + } + + return root; + } + } + + private void assertValues(VectorSchemaRoot root) { + var valueVector = (IntVector) root.getVector("value"); + assertEquals(1, valueVector.get(0)); + assertEquals(2, valueVector.get(1)); + assertEquals(3, valueVector.get(2)); + assertEquals(4, valueVector.get(3)); + } + + private void assertDescription(VectorSchemaRoot root) { + var descVector = (VarCharVector) root.getVector("description"); + assertEquals("number one", descVector.getObject(0).toString()); + assertEquals("number two", descVector.getObject(1).toString()); + assertTrue(descVector.isNull(2)); + assertEquals("number four", descVector.getObject(3).toString()); + } + + private void assertIp(VectorSchemaRoot root) { + // Test data that has been transformed during output (ipV4 truncated to 32bits) + var ipVector = (VarBinaryVector) root.getVector("ip"); + assertArrayEquals(new byte[] { (byte) 192, (byte) 168, 0, 1 }, ipVector.getObject(0)); + assertArrayEquals(new byte[] { (byte) 192, (byte) 168, 0, 2 }, ipVector.getObject(1)); + assertArrayEquals( + new byte[] { 0x20, 0x01, 0x0d, (byte) 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }, + ipVector.getObject(2) + ); + assertArrayEquals( + new byte[] { + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + (byte) 0xaf, + (byte) 0xff, + 0x45, + 0x67, + (byte) 0x89, + 0x0A }, + ipVector.getObject(3) + ); + } + + private void assertVersion(VectorSchemaRoot root) { + // Version is binary-encoded in ESQL vectors, turned into a string in arrow output + var versionVector = (VarCharVector) root.getVector("v"); + assertEquals("1.0.1", versionVector.getObject(0).toString()); + assertEquals("1.0.2", versionVector.getObject(1).toString()); + assertTrue(versionVector.isNull(2)); + assertEquals("1.0.4", versionVector.getObject(3).toString()); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java index a3af3cbc8458b..15d55e0258110 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java @@ -25,7 +25,21 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { - super(fileName, groupName, testName, lineNumber, testCase, mode); + public EsqlSpecIT( + String fileName, + String groupName, + String testName, + Integer lineNumber, + CsvTestCase testCase, + String instructions, + Mode mode + ) { + super(fileName, groupName, testName, lineNumber, testCase, instructions, mode); + } + + @Override + protected boolean enableRoundingDoubleValuesOnAsserting() { + // This suite runs with more than one node and three shards in serverless + return cluster.getNumNodes() > 1; } } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index b231de66f29a6..bc445deb10f0b 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -37,6 +37,9 @@ import org.junit.Before; import java.io.IOException; +import java.math.BigDecimal; +import java.math.MathContext; +import java.math.RoundingMode; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -79,6 +82,7 @@ public abstract class EsqlSpecTestCase extends ESRestTestCase { private final String testName; private final Integer lineNumber; protected final CsvTestCase testCase; + protected final String instructions; protected final Mode mode; public enum Mode { @@ -105,12 +109,21 @@ public static List readScriptSpec() throws Exception { return testcases; } - protected EsqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { + protected EsqlSpecTestCase( + String fileName, + String groupName, + String testName, + Integer lineNumber, + CsvTestCase testCase, + String instructions, + Mode mode + ) { this.fileName = fileName; this.groupName = groupName; this.testName = testName; this.lineNumber = lineNumber; this.testCase = testCase; + this.instructions = instructions; this.mode = mode; } @@ -152,7 +165,7 @@ public final void test() throws Throwable { protected void shouldSkipTest(String testName) throws IOException { checkCapabilities(adminClient(), testFeatureService, testName, testCase); - assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); + assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, instructions, Version.CURRENT)); } protected static void checkCapabilities(RestClient client, TestFeatureService testFeatureService, String testName, CsvTestCase testCase) @@ -201,11 +214,7 @@ protected final void doTest() throws Throwable { builder.tables(tables()); } - Map answer = runEsql( - builder.query(testCase.query), - testCase.expectedWarnings(false), - testCase.expectedWarningsRegex() - ); + Map answer = runEsql(builder.query(testCase.query), testCase.expectedWarnings(), testCase.expectedWarningsRegex()); var expectedColumnsWithValues = loadCsvSpecValues(testCase.expectedResults); @@ -244,10 +253,10 @@ protected void assertResults( Logger logger ) { assertMetadata(expected, actualColumns, logger); - assertData(expected, actualValues, testCase.ignoreOrder, logger, EsqlSpecTestCase::valueMapper); + assertData(expected, actualValues, testCase.ignoreOrder, logger, this::valueMapper); } - private static Object valueMapper(CsvTestUtils.Type type, Object value) { + private Object valueMapper(CsvTestUtils.Type type, Object value) { if (value == null) { return "null"; } @@ -262,9 +271,30 @@ private static Object valueMapper(CsvTestUtils.Type type, Object value) { } catch (Throwable ignored) {} } } + if (type == CsvTestUtils.Type.DOUBLE && enableRoundingDoubleValuesOnAsserting()) { + if (value instanceof List vs) { + List values = new ArrayList<>(); + for (Object v : vs) { + values.add(valueMapper(type, v)); + } + return values; + } else if (value instanceof Double d) { + return new BigDecimal(d).round(new MathContext(10, RoundingMode.DOWN)).doubleValue(); + } else if (value instanceof String s) { + return new BigDecimal(s).round(new MathContext(10, RoundingMode.DOWN)).doubleValue(); + } + } return value.toString(); } + /** + * Rounds double values when asserting double values returned in queries. + * By default, no rounding is performed. + */ + protected boolean enableRoundingDoubleValuesOnAsserting() { + return false; + } + private static String normalizedPoint(CsvTestUtils.Type type, double x, double y) { if (type == CsvTestUtils.Type.GEO_POINT) { return normalizedGeoPoint(x, y); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index ad7c3fba1683e..bfa9c11cbcd53 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -10,7 +10,6 @@ import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatters; @@ -28,6 +27,7 @@ import org.elasticsearch.logging.Logger; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.esql.action.ResponseValueUtils; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.supercsv.io.CsvListReader; import org.supercsv.prefs.CsvPreference; @@ -69,21 +69,21 @@ public final class CsvTestUtils { private CsvTestUtils() {} - public static boolean isEnabled(String testName, Version version) { + public static boolean isEnabled(String testName, String instructions, Version version) { if (testName.endsWith("-Ignore")) { return false; } - Tuple skipRange = skipVersionRange(testName); + Tuple skipRange = skipVersionRange(testName, instructions); if (skipRange != null && version.onOrAfter(skipRange.v1()) && version.onOrBefore(skipRange.v2())) { return false; } return true; } - private static final Pattern INSTRUCTION_PATTERN = Pattern.compile("#\\[(.*?)]"); + private static final Pattern INSTRUCTION_PATTERN = Pattern.compile("\\[(.*?)]"); - public static Map extractInstructions(String testName) { - Matcher matcher = INSTRUCTION_PATTERN.matcher(testName); + public static Map parseInstructions(String instructions) { + Matcher matcher = INSTRUCTION_PATTERN.matcher(instructions); Map pairs = new HashMap<>(); if (matcher.find()) { String[] groups = matcher.group(1).split(","); @@ -98,8 +98,8 @@ public static Map extractInstructions(String testName) { return pairs; } - public static Tuple skipVersionRange(String testName) { - Map pairs = extractInstructions(testName); + public static Tuple skipVersionRange(String testName, String instructions) { + Map pairs = parseInstructions(instructions); String versionRange = pairs.get("skip"); if (versionRange != null) { String[] skipVersions = versionRange.split("-", Integer.MAX_VALUE); @@ -331,15 +331,15 @@ public static ExpectedResults loadCsvSpecValues(String csv) { columnTypes = new ArrayList<>(header.length); for (String c : header) { - String[] nameWithType = Strings.split(c, ":"); - if (nameWithType == null || nameWithType.length != 2) { + String[] nameWithType = escapeTypecast(c).split(":"); + if (nameWithType.length != 2) { throw new IllegalArgumentException("Invalid CSV header " + c); } - String typeName = nameWithType[1].trim(); - if (typeName.length() == 0) { - throw new IllegalArgumentException("A type is always expected in the csv file; found " + nameWithType); + String typeName = unescapeTypecast(nameWithType[1]).trim(); + if (typeName.isEmpty()) { + throw new IllegalArgumentException("A type is always expected in the csv file; found " + Arrays.toString(nameWithType)); } - String name = nameWithType[0].trim(); + String name = unescapeTypecast(nameWithType[0]).trim(); columnNames.add(name); Type type = Type.asType(typeName); if (type == null) { @@ -397,6 +397,16 @@ public static ExpectedResults loadCsvSpecValues(String csv) { } } + private static final String TYPECAST_SPACER = "__TYPECAST__"; + + private static String escapeTypecast(String typecast) { + return typecast.replace("::", TYPECAST_SPACER); + } + + private static String unescapeTypecast(String typecast) { + return typecast.replace(TYPECAST_SPACER, "::"); + } + public enum Type { INTEGER(Integer::parseInt, Integer.class), LONG(Long::parseLong, Long.class), @@ -537,7 +547,7 @@ public Comparator comparator() { record ActualResults( List columnNames, List columnTypes, - List dataTypes, + List dataTypes, List pages, Map> responseHeaders ) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index ec5770e8ce70b..530b2bc01b3d6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -95,6 +95,7 @@ public class CsvTestsDataLoader { "mapping-cartesian_multipolygons.json", "cartesian_multipolygons.csv" ); + private static final TestsDataset DISTANCES = new TestsDataset("distances", "mapping-distances.json", "distances.csv"); private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv", "k8s-settings.json", true); @@ -119,7 +120,8 @@ public class CsvTestsDataLoader { Map.entry(COUNTRIES_BBOX_WEB.indexName, COUNTRIES_BBOX_WEB), Map.entry(AIRPORT_CITY_BOUNDARIES.indexName, AIRPORT_CITY_BOUNDARIES), Map.entry(CARTESIAN_MULTIPOLYGONS.indexName, CARTESIAN_MULTIPOLYGONS), - Map.entry(K8S.indexName, K8S) + Map.entry(K8S.indexName, K8S), + Map.entry(DISTANCES.indexName, DISTANCES) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index d7e067658267f..2bf3baf845010 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -33,7 +33,6 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.Range; import org.elasticsearch.xpack.esql.core.index.EsIndex; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.session.Configuration; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -49,6 +48,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index c0572e7bbcd49..adbf24cee10b0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -67,10 +67,9 @@ required_capability: mv_warn from employees | keep emp_no, is_rehired, still_hired | where is_rehired in (still_hired, true) | where is_rehired != still_hired; ignoreOrder:true -warning:Line 1:63: evaluation of [is_rehired in (still_hired, true)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:63: java.lang.IllegalArgumentException: single-value function encountered multi-value -warning:Line 1:105: evaluation of [is_rehired != still_hired] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:105: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[is_rehired in \(still_hired, true\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[is_rehired != still_hired\] failed, treating result as null. Only first 20 failures recorded. emp_no:integer |is_rehired:boolean |still_hired:boolean 10021 |true |false 10029 |true |false diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index f41bf3f020eb5..7e2afb9267e5b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -570,3 +570,123 @@ ROW long = TO_LONG(100), double = 99., int = 100 b1:double| b2:double| b3:double 99.0 |0.0 |99.0 ; + + +zeroBucketsRow#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +ROW a = 1 +| STATS max = max(a) BY b = BUCKET(a, 0, 0, 0) +; +warningRegex:evaluation of \[BUCKET\(a, 0, 0, 0\)\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.ArithmeticException: / by zero + +max:integer | b:double +1 | null +; + + +zeroBuckets#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +FROM employees +| STATS max = max(salary) BY b = BUCKET(salary, 0, 0, 0) +; +warningRegex:evaluation of \[BUCKET\(salary, 0, 0, 0\)\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.ArithmeticException: / by zero + +max:integer | b:double +74999 | null +; + + +zeroBucketsDouble#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +FROM employees +| STATS max = max(salary) BY b = BUCKET(salary, 0.) +; +warningRegex:evaluation of \[BUCKET\(salary, 0.\)\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.ArithmeticException: / by zero + +max:integer | b:double +74999 | null +; + +minusOneBucketsRow#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +ROW a = 1 +| STATS max = max(a) BY b = BUCKET(a, -1, 0, 0) +; +warningRegex:evaluation of \[BUCKET\(a, -1, 0, 0\)\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.ArithmeticException: / by zero + +max:integer | b:double +1 | null +; + + +minusOneBuckets#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +FROM employees +| STATS max = max(salary) BY b = BUCKET(salary, -1, 0, 0) +; +warningRegex:evaluation of \[BUCKET\(salary, -1, 0, 0\)\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.ArithmeticException: / by zero + +max:integer | b:double +74999 | null +; + + +tooManyBucketsRow#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +ROW a = 1 +| STATS max = max(a) BY b = BUCKET(a, 100000000000, 0, 0) +; +warningRegex:evaluation of \[BUCKET\(a, 100000000000, 0, 0\)\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.ArithmeticException: / by zero + +max:integer | b:double +1 | null +; + + +tooManyBuckets#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +FROM employees +| STATS max = max(salary) BY b = BUCKET(salary, 100000000000, 0, 0) +; +warningRegex:evaluation of \[BUCKET\(salary, 100000000000, 0, 0\)\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.ArithmeticException: / by zero + +max:integer | b:double +74999 | null +; + + +foldableBuckets +required_capability: casting_operator +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| EVAL c = concat("2", "0")::int +| STATS hires_per_month = COUNT(*) BY month = BUCKET(hire_date, c, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| SORT month +; + + hires_per_month:long | month:date +2 |1985-02-01T00:00:00.000Z +1 |1985-05-01T00:00:00.000Z +1 |1985-07-01T00:00:00.000Z +1 |1985-09-01T00:00:00.000Z +2 |1985-10-01T00:00:00.000Z +4 |1985-11-01T00:00:00.000Z +; + + +foldableBucketsInline +required_capability: casting_operator +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| STATS hires_per_month = COUNT(*) BY month = BUCKET(hire_date, concat("2", "0")::int, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| SORT month +; + + hires_per_month:long | month:date +2 |1985-02-01T00:00:00.000Z +1 |1985-05-01T00:00:00.000Z +1 |1985-07-01T00:00:00.000Z +1 |1985-09-01T00:00:00.000Z +2 |1985-10-01T00:00:00.000Z +4 |1985-11-01T00:00:00.000Z +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec index 225ea37688689..812198c324217 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/dissect.csv-spec @@ -14,6 +14,43 @@ foo bar | null | null ; +shadowing +FROM employees +| KEEP first_name, last_name +| WHERE last_name == "Facello" +| EVAL left = "left", full_name = concat(first_name, " ", last_name) , last_name = "last_name", right = "right" +| DISSECT full_name "%{?} %{last_name}" +; + +first_name:keyword | left:keyword | full_name:keyword | right:keyword | last_name:keyword +Georgi | left | Georgi Facello | right | Facello +; + +shadowingSelf +FROM employees +| KEEP first_name, last_name +| WHERE last_name == "Facello" +| EVAL left = "left", name = concat(first_name, "1 ", last_name), right = "right" +| DISSECT name "%{name} %{?}" +; + +first_name:keyword | last_name:keyword | left:keyword | right:keyword | name:keyword +Georgi | Facello | left | right | Georgi1 +; + +shadowingMulti +FROM employees +| KEEP first_name, last_name +| WHERE last_name == "Facello" +| EVAL left = "left", foo = concat(first_name, "1 ", first_name, "2 ", last_name) , middle = "middle", bar = "bar", right = "right" +| DISSECT foo "%{bar} %{first_name} %{last_name_again}" +; + +last_name:keyword | left:keyword | foo:keyword | middle:keyword | right:keyword | bar:keyword | first_name:keyword | last_name_again:keyword +Facello | left | Georgi1 Georgi2 Facello | middle | right | Georgi1 | Georgi2 | Facello +; + + complexPattern ROW a = "1953-01-23T12:15:00Z - some text - 127.0.0.1;" | DISSECT a "%{Y}-%{M}-%{D}T%{h}:%{m}:%{s}Z - %{msg} - %{ip};" @@ -138,6 +175,17 @@ Parto Bamford | Parto | Bamford ; +// different from shadowingSelf because in this case we dissect an indexed field +// see https://github.com/elastic/elasticsearch/issues/110184 +overwriteInputName +required_capability: grok_dissect_masking +from employees | sort emp_no asc | dissect first_name "%{first_name}o%{rest}" | keep emp_no, first_name, rest | limit 1; + +emp_no:integer | first_name:keyword | rest:keyword +10001 | Ge | rgi +; + + overwriteNameWhere from employees | sort emp_no asc | eval full_name = concat(first_name, " ", last_name) | dissect full_name "%{emp_no} %{b}" | where emp_no == "Bezalel" | keep full_name, emp_no, b | limit 3; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/distances.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/distances.csv new file mode 100644 index 0000000000000..3c21d41d7452b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/distances.csv @@ -0,0 +1,118 @@ +distance:double,location:geo_point +0.2848062860101461,POINT (5.867332220077515E-7 2.514570951461792E-6) +0.2848062860101461,POINT (5.867332220077515E-7 2.514570951461792E-6) +0.2848062860101461,POINT (5.867332220077515E-7 2.514570951461792E-6) +0.30021218524180354,POINT (6.705522537231445E-7 2.514570951461792E-6) +0.30021218524180354,POINT (6.705522537231445E-7 2.514570951461792E-6) +0.30021218524180354,POINT (6.705522537231445E-7 2.514570951461792E-6) +0.2848062860101461,POINT (5.867332220077515E-7 2.514570951461792E-6) +0.2848062860101461,POINT (5.867332220077515E-7 2.514570951461792E-6) +0.2848062860101461,POINT (5.867332220077515E-7 2.514570951461792E-6) +0.30021218524180354,POINT (6.705522537231445E-7 2.514570951461792E-6) +0.30021218524180354,POINT (6.705522537231445E-7 2.514570951461792E-6) +0.30021218524180354,POINT (6.705522537231445E-7 2.514570951461792E-6) +0.26851794154977293,POINT (5.867332220077515E-7 2.4726614356040955E-6) +0.26851794154977293,POINT (5.867332220077515E-7 2.4726614356040955E-6) +0.26851794154977293,POINT (5.867332220077515E-7 2.4726614356040955E-6) +0.2848062860101461,POINT (6.705522537231445E-7 2.4726614356040955E-6) +0.2848062860101461,POINT (6.705522537231445E-7 2.4726614356040955E-6) +0.2848062860101461,POINT (6.705522537231445E-7 2.4726614356040955E-6) +0.2848062860101461,POINT (1.0058283805847168E-6 2.346932888031006E-6) +0.2848062860101461,POINT (1.0058283805847168E-6 2.346932888031006E-6) +0.2848062860101461,POINT (1.0058283805847168E-6 2.346932888031006E-6) +0.30021218524180354,POINT (1.0896474123001099E-6 2.346932888031006E-6) +0.30021218524180354,POINT (1.0896474123001099E-6 2.346932888031006E-6) +0.30021218524180354,POINT (1.0896474123001099E-6 2.346932888031006E-6) +0.2848062860101461,POINT (1.0058283805847168E-6 2.346932888031006E-6) +0.2848062860101461,POINT (1.0058283805847168E-6 2.346932888031006E-6) +0.2848062860101461,POINT (1.0058283805847168E-6 2.346932888031006E-6) +0.30021218524180354,POINT (1.0896474123001099E-6 2.346932888031006E-6) +0.30021218524180354,POINT (1.0896474123001099E-6 2.346932888031006E-6) +0.30021218524180354,POINT (1.0896474123001099E-6 2.346932888031006E-6) +0.2685179415497728,POINT (1.0058283805847168E-6 2.3050233721733093E-6) +0.2685179415497728,POINT (1.0058283805847168E-6 2.3050233721733093E-6) +0.2685179415497728,POINT (1.0058283805847168E-6 2.3050233721733093E-6) +0.2848062860101459,POINT (1.0896474123001099E-6 2.3050233721733093E-6) +0.2848062860101459,POINT (1.0896474123001099E-6 2.3050233721733093E-6) +0.2848062860101459,POINT (1.0896474123001099E-6 2.3050233721733093E-6) +0.2848062860101459,POINT (1.341104507446289E-6 2.1792948246002197E-6) +0.2848062860101459,POINT (1.341104507446289E-6 2.1792948246002197E-6) +0.2848062860101459,POINT (1.341104507446289E-6 2.1792948246002197E-6) +0.30021218524180354,POINT (1.4249235391616821E-6 2.1792948246002197E-6) +0.30021218524180354,POINT (1.4249235391616821E-6 2.1792948246002197E-6) +0.30021218524180354,POINT (1.4249235391616821E-6 2.1792948246002197E-6) +0.2848062860101459,POINT (1.341104507446289E-6 2.1792948246002197E-6) +0.2848062860101459,POINT (1.341104507446289E-6 2.1792948246002197E-6) +0.2848062860101459,POINT (1.341104507446289E-6 2.1792948246002197E-6) +0.30021218524180354,POINT (1.4249235391616821E-6 2.1792948246002197E-6) +0.30021218524180354,POINT (1.4249235391616821E-6 2.1792948246002197E-6) +0.30021218524180354,POINT (1.4249235391616821E-6 2.1792948246002197E-6) +0.2685179415497728,POINT (1.341104507446289E-6 2.137385308742523E-6) +0.2685179415497728,POINT (1.341104507446289E-6 2.137385308742523E-6) +0.2685179415497728,POINT (1.341104507446289E-6 2.137385308742523E-6) +0.2848062860101459,POINT (1.4249235391616821E-6 2.137385308742523E-6) +0.2848062860101459,POINT (1.4249235391616821E-6 2.137385308742523E-6) +0.2848062860101459,POINT (1.4249235391616821E-6 2.137385308742523E-6) +0.2848062860101459,POINT (1.5925616025924683E-6 2.0116567611694336E-6) +0.2848062860101459,POINT (1.5925616025924683E-6 2.0116567611694336E-6) +0.2848062860101459,POINT (1.5925616025924683E-6 2.0116567611694336E-6) +0.30021218524180354,POINT (1.6763806343078613E-6 2.0116567611694336E-6) +0.30021218524180354,POINT (1.6763806343078613E-6 2.0116567611694336E-6) +0.30021218524180354,POINT (1.6763806343078613E-6 2.0116567611694336E-6) +0.2848062860101459,POINT (1.5925616025924683E-6 2.0116567611694336E-6) +0.2848062860101459,POINT (1.5925616025924683E-6 2.0116567611694336E-6) +0.2848062860101459,POINT (1.5925616025924683E-6 2.0116567611694336E-6) +0.30021218524180354,POINT (1.6763806343078613E-6 2.0116567611694336E-6) +0.30021218524180354,POINT (1.6763806343078613E-6 2.0116567611694336E-6) +0.30021218524180354,POINT (1.6763806343078613E-6 2.0116567611694336E-6) +0.2685179415497728,POINT (1.5925616025924683E-6 1.969747245311737E-6) +0.2685179415497728,POINT (1.5925616025924683E-6 1.969747245311737E-6) +0.2685179415497728,POINT (1.5925616025924683E-6 1.969747245311737E-6) +0.2848062860101459,POINT (1.6763806343078613E-6 1.969747245311737E-6) +0.2848062860101459,POINT (1.6763806343078613E-6 1.969747245311737E-6) +0.2848062860101459,POINT (1.6763806343078613E-6 1.969747245311737E-6) +0.2848062860101459,POINT (1.7601996660232544E-6 1.8440186977386475E-6) +0.2848062860101459,POINT (1.7601996660232544E-6 1.8440186977386475E-6) +0.2848062860101459,POINT (1.7601996660232544E-6 1.8440186977386475E-6) +0.30021218524180354,POINT (1.8440186977386475E-6 1.8440186977386475E-6) +0.30021218524180354,POINT (1.8440186977386475E-6 1.8440186977386475E-6) +0.30021218524180354,POINT (1.8440186977386475E-6 1.8440186977386475E-6) +0.2848062860101459,POINT (1.7601996660232544E-6 1.8440186977386475E-6) +0.2848062860101459,POINT (1.7601996660232544E-6 1.8440186977386475E-6) +0.2848062860101459,POINT (1.7601996660232544E-6 1.8440186977386475E-6) +0.30021218524180354,POINT (1.8440186977386475E-6 1.8440186977386475E-6) +0.30021218524180354,POINT (1.8440186977386475E-6 1.8440186977386475E-6) +0.30021218524180354,POINT (1.8440186977386475E-6 1.8440186977386475E-6) +0.2685179415497728,POINT (1.7601996660232544E-6 1.802109181880951E-6) +0.2685179415497728,POINT (1.7601996660232544E-6 1.802109181880951E-6) +0.2685179415497728,POINT (1.7601996660232544E-6 1.802109181880951E-6) +0.2848062860101459,POINT (1.8440186977386475E-6 1.802109181880951E-6) +0.2848062860101459,POINT (1.8440186977386475E-6 1.802109181880951E-6) +0.2848062860101459,POINT (1.8440186977386475E-6 1.802109181880951E-6) +3.2597569375901188,POINT (2.3720785975456238E-5 -1.7224811017513275E-5) +3.2597569375901188,POINT (2.3720785975456238E-5 -1.7224811017513275E-5) +3.2597569375901188,POINT (2.3720785975456238E-5 -1.7224811017513275E-5) +3.2625206150153967,POINT (2.3720785975456238E-5 -1.726672053337097E-5) +3.2625206150153967,POINT (2.3720785975456238E-5 -1.726672053337097E-5) +3.2625206150153967,POINT (2.3720785975456238E-5 -1.726672053337097E-5) +3.2625206150153967,POINT (2.3720785975456238E-5 -1.726672053337097E-5) +3.2625206150153967,POINT (2.3720785975456238E-5 -1.726672053337097E-5) +3.2625206150153967,POINT (2.3720785975456238E-5 -1.726672053337097E-5) +3.2597569375901188,POINT (2.346932888031006E-5 -1.7560087144374847E-5) +3.2597569375901188,POINT (2.346932888031006E-5 -1.7560087144374847E-5) +3.2597569375901188,POINT (2.346932888031006E-5 -1.7560087144374847E-5) +3.2625206150153967,POINT (2.346932888031006E-5 -1.7601996660232544E-5) +3.2625206150153967,POINT (2.346932888031006E-5 -1.7601996660232544E-5) +3.2625206150153967,POINT (2.346932888031006E-5 -1.7601996660232544E-5) +3.2625206150153967,POINT (2.346932888031006E-5 -1.7601996660232544E-5) +3.2625206150153967,POINT (2.346932888031006E-5 -1.7601996660232544E-5) +3.2625206150153967,POINT (2.346932888031006E-5 -1.7601996660232544E-5) +3.258374219844941,POINT (2.2547319531440735E-5 -1.873355358839035E-5) +3.258374219844941,POINT (2.2547319531440735E-5 -1.873355358839035E-5) +3.258374219844941,POINT (2.2547319531440735E-5 -1.873355358839035E-5) +3.262520615015394,POINT (2.2547319531440735E-5 -1.8775463104248047E-5) +3.262520615015394,POINT (2.2547319531440735E-5 -1.8775463104248047E-5) +3.262520615015394,POINT (2.2547319531440735E-5 -1.8775463104248047E-5) +3.262520615015394,POINT (2.2547319531440735E-5 -1.8775463104248047E-5) +3.262520615015394,POINT (2.2547319531440735E-5 -1.8775463104248047E-5) +3.262520615015394,POINT (2.2547319531440735E-5 -1.8775463104248047E-5) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec index bd384886f0dd7..fc8c48afdf8cc 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec @@ -31,6 +31,82 @@ FROM sample_data median_duration:double | env:keyword ; +shadowing +required_capability: enrich_load +ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" +| ENRICH clientip_policy ON client_ip +; + +left:keyword | client_ip:keyword | right:keyword | env:keyword +left | 172.21.0.5 | right | Development +; + +shadowingLimit0 +ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" +| ENRICH clientip_policy ON client_ip +| LIMIT 0 +; + +left:keyword | client_ip:keyword | right:keyword | env:keyword +; + +shadowingWithAlias +required_capability: enrich_load +ROW left = "left", foo = "foo", client_ip = "172.21.0.5", env = "env", right = "right" +| ENRICH clientip_policy ON client_ip WITH foo = env +; + +left:keyword | client_ip:keyword | env:keyword | right:keyword | foo:keyword +left | 172.21.0.5 | env | right | Development +; + +shadowingWithAliasLimit0 +ROW left = "left", foo = "foo", client_ip = "172.21.0.5", env = "env", right = "right" +| ENRICH clientip_policy ON client_ip WITH foo = env +| LIMIT 0 +; + +left:keyword | client_ip:keyword | env:keyword | right:keyword | foo:keyword +; + +shadowingSelf +required_capability: enrich_load +ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" +| ENRICH clientip_policy ON client_ip WITH client_ip = env +; + +left:keyword | env:keyword | right:keyword | client_ip:keyword +left | env | right | Development +; + +shadowingSelfLimit0 +ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" +| ENRICH clientip_policy ON client_ip WITH client_ip = env +| LIMIT 0 +; + +left:keyword | env:keyword | right:keyword | client_ip:keyword +; + +shadowingMulti#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +required_capability: enrich_load +ROW left = "left", airport = "Zurich Airport ZRH", city = "Zürich", middle = "middle", region = "North-East Switzerland", right = "right" +| ENRICH city_names ON city WITH airport, region, city_boundary +; + +left:keyword | city:keyword | middle:keyword | right:keyword | airport:text | region:text | city_boundary:geo_shape +left | Zürich | middle | right | Zurich Int'l | Bezirk Zürich | "POLYGON((8.448 47.3802,8.4977 47.3452,8.5032 47.3202,8.6254 47.3547,8.5832 47.3883,8.5973 47.4063,8.5431 47.4329,8.4858 47.431,8.4691 47.4169,8.473 47.3951,8.448 47.3802))" +; + +shadowingMultiLimit0#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] +ROW left = "left", airport = "Zurich Airport ZRH", city = "Zürich", middle = "middle", region = "North-East Switzerland", right = "right" +| ENRICH city_names ON city WITH airport, region, city_boundary +| LIMIT 0 +; + +left:keyword | city:keyword | middle:keyword | right:keyword | airport:text | region:text | city_boundary:geo_shape +; + simple required_capability: enrich_load @@ -428,8 +504,8 @@ FROM airports | EVAL boundary_wkt_length = LENGTH(TO_STRING(city_boundary)) | STATS city_centroid = ST_CENTROID_AGG(city_location), count = COUNT(city_location), min_wkt = MIN(boundary_wkt_length), max_wkt = MAX(boundary_wkt_length) ; -warning:Line 3:30: evaluation of [LENGTH(TO_STRING(city_boundary))] failed, treating result as null. Only first 20 failures recorded. -warning:Line 3:30: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[LENGTH\(TO_STRING\(city_boundary\)\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value city_centroid:geo_point | count:long | min_wkt:integer | max_wkt:integer POINT(1.396561 24.127649) | 872 | 88 | 1044 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 571d7835451c3..3df3b85e5e3af 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -5,6 +5,34 @@ a:integer | b:integer 1 | 2 ; + +shadowing +ROW left = "left", x = 10000 , right = "right" +| EVAL x = 1 +; + +left:keyword | right:keyword | x:integer +left | right | 1 +; + +shadowingSelf +ROW left = "left", x = 10000 , right = "right" +| EVAL x = x + 1 +; + +left:keyword | right:keyword | x:integer +left | right | 10001 +; + +shadowingMulti +ROW left = "left", x = 0, middle = "middle", y = -1, right = "right" +| EVAL x = 9, y = 10 +; + +left:keyword | middle:keyword | right:keyword | x:integer | y:integer +left | middle | right | 9 | 10 +; + withMath row a = 1 | eval b = 2 + 3; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 66f4e9a33ceff..537b69547c6be 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -1,4 +1,11 @@ // Floating point types-specific tests +parseLargeMagnitudeValues +required_capability: fix_parsing_large_negative_numbers +row a = 92233720368547758090, b = -9223372036854775809; + +a:double | b:double +9.223372036854776E+19 | -9.223372036854776E+18 +; inDouble from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002) | sort emp_no; @@ -95,8 +102,8 @@ lessThanMultivalue required_capability: mv_warn from employees | where salary_change < 1 | keep emp_no, salary_change | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [salary_change < 1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[salary_change < 1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't less than 1 - they are null - so they aren't included emp_no:integer |salary_change:double @@ -111,8 +118,8 @@ greaterThanMultivalue required_capability: mv_warn from employees | where salary_change > 1 | keep emp_no, salary_change | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [salary_change > 1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[salary_change > 1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't greater than 1 - they are null - so they aren't included emp_no:integer |salary_change:double @@ -165,8 +172,8 @@ notLessThanMultivalue required_capability: mv_warn from employees | where not(salary_change < 1) | keep emp_no, salary_change | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [not(salary_change < 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change < 1] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*salary_change < 1.*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't less than 1 - they are null - so they aren't included emp_no:integer |salary_change:double @@ -181,8 +188,8 @@ notGreaterThanMultivalue required_capability: mv_warn from employees | where not(salary_change > 1) | keep emp_no, salary_change | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [not(salary_change > 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change > 1] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*salary_change > 1.*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't less than 1 - they are null - so they aren't included emp_no:integer |salary_change:double @@ -197,8 +204,8 @@ notEqualToMultivalue required_capability: mv_warn from employees | where not(salary_change == 1.19) | keep emp_no, salary_change | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [not(salary_change == 1.19)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change == 1.19] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*salary_change == 1.19.*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't greater than 1 - they are null - so they aren't included emp_no:integer |salary_change:double diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec index fbe31deeb0f97..9d574eed7be6b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/grok.csv-spec @@ -14,6 +14,43 @@ foo bar | null ; +shadowing +FROM employees +| KEEP first_name, last_name +| WHERE last_name == "Facello" +| EVAL left = "left", full_name = concat(first_name, " ", last_name) , last_name = "last_name", right = "right" +| GROK full_name "%{WORD} %{WORD:last_name}" +; + +first_name:keyword | left:keyword | full_name:keyword | right:keyword | last_name:keyword +Georgi | left | Georgi Facello | right | Facello +; + +shadowingSelf +FROM employees +| KEEP first_name, last_name +| WHERE last_name == "Facello" +| EVAL left = "left", name = concat(first_name, "1 ", last_name), right = "right" +| GROK name "%{WORD:name} %{WORD}" +; + +first_name:keyword | last_name:keyword | left:keyword | right:keyword | name:keyword +Georgi | Facello | left | right | Georgi1 +; + +shadowingMulti +FROM employees +| KEEP first_name, last_name +| WHERE last_name == "Facello" +| EVAL left = "left", foo = concat(first_name, "1 ", first_name, "2 ", last_name) , middle = "middle", bar = "bar", right = "right" +| GROK foo "%{WORD:bar} %{WORD:first_name} %{WORD:last_name_again}" +; + +last_name:keyword | left:keyword | foo:keyword | middle:keyword | right:keyword | bar:keyword | first_name:keyword | last_name_again:keyword +Facello | left | Georgi1 Georgi2 Facello | middle | right | Georgi1 | Georgi2 | Facello +; + + complexPattern ROW a = "1953-01-23T12:15:00Z 127.0.0.1 some.email@foo.com 42" | GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" @@ -192,3 +229,11 @@ emp_no:integer | a:keyword | b:keyword 10004 | [Head, Reporting, Support, Tech] | [Human, Analyst, Engineer, Lead] | Resources | [Head Human Resources, Reporting Analyst, Support Engineer, Tech Lead] 10005 | null | null | null | null ; + +overwriteInputName +required_capability: grok_dissect_masking +row text = "123 abc", int = 5 | sort int asc | grok text "%{NUMBER:text:int} %{WORD:description}" | keep text, int, description; + +text:integer | int:integer | description:keyword +123 | 5 | abc +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 2e45febe0de1d..c8cb6cf88a4f0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -4,8 +4,8 @@ inLongAndInt required_capability: mv_warn from employees | where avg_worked_seconds in (372957040, salary_change.long, 236703986) | where emp_no in (10017, emp_no - 1) | keep emp_no, avg_worked_seconds; -warning:Line 1:24: evaluation of [avg_worked_seconds in (372957040, salary_change.long, 236703986)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[avg_worked_seconds in \(372957040, salary_change.long, 236703986\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value emp_no:integer |avg_worked_seconds:long 10017 |236703986 @@ -268,8 +268,8 @@ lessThanMultivalue required_capability: mv_warn from employees | where salary_change.int < 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [salary_change.int < 1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[salary_change.int < 1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't less than 1 - they are null - so they aren't included emp_no:integer |salary_change.int:integer @@ -284,8 +284,8 @@ greaterThanMultivalue required_capability: mv_warn from employees | where salary_change.int > 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [salary_change.int > 1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[salary_change.int > 1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't greater than 1 - they are null - so they aren't included emp_no:integer |salary_change.int:integer @@ -300,8 +300,8 @@ equalToMultivalue required_capability: mv_warn from employees | where salary_change.int == 0 | keep emp_no, salary_change.int | sort emp_no; -warning:Line 1:24: evaluation of [salary_change.int == 0] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[salary_change.int == 0\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't greater than 1 - they are null - so they aren't included emp_no:integer |salary_change.int:integer @@ -315,8 +315,8 @@ equalToOrEqualToMultivalue required_capability: mv_warn from employees | where salary_change.int == 1 or salary_change.int == 8 | keep emp_no, salary_change.int | sort emp_no; -warning:Line 1:24: evaluation of [salary_change.int] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[salary_change.int\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries are filtered out emp_no:integer |salary_change.int:integer @@ -328,8 +328,8 @@ inMultivalue required_capability: mv_warn from employees | where salary_change.int in (1, 7) | keep emp_no, salary_change.int | sort emp_no; -warning:Line 1:24: evaluation of [salary_change.int in (1, 7)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[salary_change.int in \(1, 7\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries are filtered out emp_no:integer |salary_change.int:integer @@ -341,8 +341,8 @@ notLessThanMultivalue required_capability: mv_warn from employees | where not(salary_change.int < 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [not(salary_change.int < 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int < 1] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*salary_change.int < 1.*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't less than 1 - they are null - so they aren't included emp_no:integer |salary_change.int:integer @@ -357,8 +357,8 @@ notGreaterThanMultivalue required_capability: mv_warn from employees | where not(salary_change.int > 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [not(salary_change.int > 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int > 1] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*salary_change.int > 1.*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't less than 1 - they are null - so they aren't included emp_no:integer |salary_change.int:integer @@ -373,8 +373,8 @@ notEqualToMultivalue required_capability: mv_warn from employees | where not(salary_change.int == 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; -warning:Line 1:24: evaluation of [not(salary_change.int == 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int == 1] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*salary_change.int == 1.*\] failed, treating result as null. Only first 20 failures recorded +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued salaries aren't greater than 1 - they are null - so they aren't included emp_no:integer |salary_change.int:integer diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 61f529d60bf90..697b1c899d65e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -19,8 +19,8 @@ equals required_capability: mv_warn from hosts | sort host, card | where ip0 == ip1 | keep card, host, ip0, ip1; -warning:Line 1:38: evaluation of [ip0 == ip1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:38: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[ip0 == ip1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |host:keyword |ip0:ip |ip1:ip eth0 |alpha |127.0.0.1 |127.0.0.1 @@ -63,8 +63,8 @@ lessThan required_capability: mv_warn from hosts | sort host, card, ip1 | where ip0 < ip1 | keep card, host, ip0, ip1; -warning:Line 1:43: evaluation of [ip0 < ip1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:43: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[ip0 < ip1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 @@ -76,8 +76,8 @@ notEquals required_capability: mv_warn from hosts | sort host, card, ip1 | where ip0 != ip1 | keep card, host, ip0, ip1; -warning:Line 1:43: evaluation of [ip0 != ip1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:43: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[ip0 != ip1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |host:keyword |ip0:ip |ip1:ip eth0 |beta |127.0.0.1 |::1 @@ -150,10 +150,10 @@ required_capability: mv_warn from hosts | eval eq=case(ip0==ip1, ip0, ip1) | where eq in (ip0, ip1) | keep card, host, ip0, ip1, eq; ignoreOrder:true -warning:Line 1:27: evaluation of [ip0==ip1] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:27: java.lang.IllegalArgumentException: single-value function encountered multi-value -warning:Line 1:55: evaluation of [eq in (ip0, ip1)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:55: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[ip0==ip1\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[eq in \(ip0, ip1\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |host:keyword |ip0:ip |ip1:ip |eq:ip eth0 |alpha |127.0.0.1 |127.0.0.1 |127.0.0.1 @@ -191,8 +191,8 @@ cidrMatchSimple required_capability: mv_warn from hosts | where cidr_match(ip1, "127.0.0.2/32") | keep card, host, ip0, ip1; -warning:Line 1:20: evaluation of [cidr_match(ip1, \"127.0.0.2/32\")] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:20: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[cidr_match\(ip1, \\\"127.0.0.2/32\\\"\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 @@ -203,8 +203,8 @@ required_capability: mv_warn from hosts | where cidr_match(ip0, "127.0.0.2/32") is null | keep card, host, ip0, ip1; ignoreOrder:true -warning:Line 1:20: evaluation of [cidr_match(ip0, \"127.0.0.2/32\")] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:20: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[cidr_match\(ip0, \\\"127.0.0.2/32\\\"\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |host:keyword |ip0:ip |ip1:ip eth0 |epsilon |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1]|fe80::cae2:65ff:fece:fec1 @@ -285,8 +285,8 @@ str1:keyword |str2:keyword |ip1:ip |ip2:ip pushDownIP from hosts | where ip1 == to_ip("::1") | keep card, host, ip0, ip1; ignoreOrder:true -warning:#[Emulated:Line 1:20: evaluation of [ip1 == to_ip(\"::1\")] failed, treating result as null. Only first 20 failures recorded.] -warning:#[Emulated:Line 1:20: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[ip1 == to_ip\(\\\"::1\\\"\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |alpha |::1 |::1 @@ -312,8 +312,8 @@ required_capability: mv_warn from hosts | where ip1 > to_ip("127.0.0.1") | keep card, ip1; ignoreOrder:true -warning:Line 1:20: evaluation of [ip1 > to_ip(\"127.0.0.1\")] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:20: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[ip1 > to_ip\(\\\"127.0.0.1\\\"\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value card:keyword |ip1:ip eth1 |127.0.0.2 @@ -553,8 +553,8 @@ required_capability: fn_ip_prefix from hosts | stats count(*) by ip_prefix(ip1, 24, 120) | sort `ip_prefix(ip1, 24, 120)`; -warning:Line 2:21: evaluation of [ip_prefix(ip1, 24, 120)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 2:21: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[ip_prefix\(ip1, 24, 120\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value count(*):long | ip_prefix(ip1, 24, 120):ip 2 | ::0 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-metrics.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-metrics.csv-spec index 91084726bfb25..7e4fd2d3f41f6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-metrics.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-metrics.csv-spec @@ -10,12 +10,120 @@ METRICS k8s | sort @timestamp DESC, cluster, pod | keep @timestamp,cluster,pod,n 2024-05-10T00:22:49.000Z | staging | two | 3 | 1.75 ; -metricsWithAggs +metricsWithAggsAndSourceQuoting required_capability: metrics_syntax -METRICS k8s max_bytes=max(to_long(network.total_bytes_in)) BY cluster | SORT max_bytes DESC; +required_capability: double_quotes_source_enclosing +METRICS "k8s" max_bytes=max(to_long(network.total_bytes_in)) BY cluster | SORT max_bytes DESC; max_bytes:long | cluster: keyword 10797 | qa 10277 | prod 7403 | staging ; + +maxRateAndSourceTripleQuoting +required_capability: metrics_syntax +required_capability: double_quotes_source_enclosing +METRICS """k8s""" max(rate(network.total_bytes_in, 1minute)); + +max(rate(network.total_bytes_in, 1minute)): double +790.4235090751945 +; + +maxCost +required_capability: metrics_syntax +METRICS k8s max_cost=max(rate(network.total_cost)); + +max_cost: double +0.16151685393258428 +; + +maxRateAndBytes +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in, 1minute)), max(network.bytes_in); + +max(rate(network.total_bytes_in, 1minute)): double | max(network.bytes_in): long +790.4235090751945 | 1021 +; + +`maxRateAndMarkupBytes` +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in, 1minute)), max(network.bytes_in * 1.05); + +max(rate(network.total_bytes_in, 1minute)): double | max(network.bytes_in * 1.05): double +790.4235090751945 | 1072.05 +; + +maxRateAndBytesAndCost +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in, 1minute)), max(network.bytes_in), max(rate(network.total_cost)); + +max(rate(network.total_bytes_in, 1minute)): double| max(network.bytes_in): long| max(rate(network.total_cost)): double +790.4235090751945 | 1021 | 0.16151685393258428 +; + +sumRate +required_capability: metrics_syntax +METRICS k8s bytes=sum(rate(network.total_bytes_in)), sum(rate(network.total_cost)) BY cluster | SORT cluster; + +bytes: double | sum(rate(network.total_cost)): double | cluster: keyword +24.49149357711476 | 0.3018995503437827 | prod +33.308519044441084 | 0.4474920369252062 | qa +18.610708062016123 | 0.24387090901805775 | staging +; + +oneRateWithBucket +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in)) BY time_bucket = bucket(@timestamp,5minute) | SORT time_bucket DESC | LIMIT 2; + +max(rate(network.total_bytes_in)): double | time_bucket:date +10.594594594594595 | 2024-05-10T00:20:00.000Z +23.702205882352942 | 2024-05-10T00:15:00.000Z +; + +twoRatesWithBucket +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in)), sum(rate(network.total_bytes_in)) BY time_bucket = bucket(@timestamp,5minute) | SORT time_bucket DESC | LIMIT 3; + +max(rate(network.total_bytes_in)): double | sum(rate(network.total_bytes_in)): double | time_bucket:date +10.594594594594595 | 42.70864495221802 | 2024-05-10T00:20:00.000Z +23.702205882352942 | 112.36715680313907 | 2024-05-10T00:15:00.000Z +17.90625 | 85.18387414067914 | 2024-05-10T00:10:00.000Z +; + + +oneRateWithBucketAndCluster +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in)) BY time_bucket = bucket(@timestamp,5minute), cluster | SORT time_bucket DESC, cluster | LIMIT 6; + +max(rate(network.total_bytes_in)): double | time_bucket:date | cluster: keyword +10.594594594594595 | 2024-05-10T00:20:00.000Z | prod +5.586206896551724 | 2024-05-10T00:20:00.000Z | qa +5.37037037037037 | 2024-05-10T00:20:00.000Z | staging +15.913978494623656 | 2024-05-10T00:15:00.000Z | prod +23.702205882352942 | 2024-05-10T00:15:00.000Z | qa +9.823232323232324 | 2024-05-10T00:15:00.000Z | staging +; + +BytesAndCostByBucketAndCluster +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in)), max(network.cost) BY time_bucket = bucket(@timestamp,5minute), cluster | SORT time_bucket DESC, cluster | LIMIT 6; + +max(rate(network.total_bytes_in)): double | max(network.cost): double | time_bucket:date | cluster: keyword +10.594594594594595 | 10.75 | 2024-05-10T00:20:00.000Z | prod +5.586206896551724 | 11.875 | 2024-05-10T00:20:00.000Z | qa +5.37037037037037 | 9.5 | 2024-05-10T00:20:00.000Z | staging +15.913978494623656 | 12.375 | 2024-05-10T00:15:00.000Z | prod +23.702205882352942 | 12.125 | 2024-05-10T00:15:00.000Z | qa +9.823232323232324 | 11.5 | 2024-05-10T00:15:00.000Z | staging +; + +oneRateWithBucketAndClusterThenFilter +required_capability: metrics_syntax +METRICS k8s max(rate(network.total_bytes_in)) BY time_bucket = bucket(@timestamp,5minute), cluster | WHERE cluster=="prod" | SORT time_bucket DESC | LIMIT 3; + +max(rate(network.total_bytes_in)): double | time_bucket:date | cluster: keyword +10.594594594594595 | 2024-05-10T00:20:00.000Z | prod +15.913978494623656 | 2024-05-10T00:15:00.000Z | prod +11.562737642585551 | 2024-05-10T00:10:00.000Z | prod +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec index 35e1101becbf9..71f74cbb113ef 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec @@ -1,5 +1,5 @@ keywordByInt -required_capability: tables_types +required_capability: lookup_v4 FROM employees | SORT emp_no | LIMIT 4 @@ -16,20 +16,20 @@ emp_no:integer | languages:integer | lang_name:keyword 10004 | 5 | five ; -keywordByMvInt -required_capability: tables_types +keywordByMvIntAndQuotedSource +required_capability: lookup_v4 ROW int=[1, 2, 3] -| LOOKUP int_number_names ON int +| LOOKUP "int_number_names" ON int ; int:integer | name:keyword [1, 2, 3] | [one, two, three] ; -keywordByDupeInt -required_capability: tables_types +keywordByDupeIntAndTripleQuotedSource +required_capability: lookup_v4 ROW int=[1, 1, 1] -| LOOKUP int_number_names ON int +| LOOKUP """int_number_names""" ON int ; int:integer | name:keyword @@ -37,7 +37,7 @@ int:integer | name:keyword ; intByKeyword -required_capability: tables_types +required_capability: lookup_v4 ROW name="two" | LOOKUP int_number_names ON name ; @@ -48,7 +48,7 @@ name:keyword | int:integer keywordByLong -required_capability: tables_types +required_capability: lookup_v4 FROM employees | SORT emp_no | LIMIT 4 @@ -66,7 +66,7 @@ emp_no:integer | languages:long | lang_name:keyword ; longByKeyword -required_capability: tables_types +required_capability: lookup_v4 ROW name="two" | LOOKUP long_number_names ON name ; @@ -76,7 +76,7 @@ name:keyword | long:long ; keywordByFloat -required_capability: tables_types +required_capability: lookup_v4 FROM employees | SORT emp_no | LIMIT 4 @@ -94,7 +94,7 @@ emp_no:integer | height:double | height_name:keyword ; floatByKeyword -required_capability: tables_types +required_capability: lookup_v4 ROW name="two point zero eight" | LOOKUP double_number_names ON name ; @@ -104,7 +104,7 @@ two point zero eight | 2.08 ; floatByNullMissing -required_capability: tables_types +required_capability: lookup_v4 ROW name=null | LOOKUP double_number_names ON name ; @@ -114,7 +114,7 @@ name:null | double:double ; floatByNullMatching -required_capability: tables_types +required_capability: lookup_v4 ROW name=null | LOOKUP double_number_names_with_null ON name ; @@ -124,7 +124,7 @@ name:null | double:double ; intIntByKeywordKeyword -required_capability: tables_types +required_capability: lookup_v4 ROW aa="foo", ab="zoo" | LOOKUP big ON aa, ab ; @@ -134,7 +134,7 @@ foo | zoo | 1 | -1 ; intIntByKeywordKeywordMissing -required_capability: tables_types +required_capability: lookup_v4 ROW aa="foo", ab="zoi" | LOOKUP big ON aa, ab ; @@ -144,7 +144,7 @@ foo | zoi | null | null ; intIntByKeywordKeywordSameValues -required_capability: tables_types +required_capability: lookup_v4 ROW aa="foo", ab="foo" | LOOKUP big ON aa, ab ; @@ -154,7 +154,7 @@ foo | foo | 2 | -2 ; intIntByKeywordKeywordSameValuesMissing -required_capability: tables_types +required_capability: lookup_v4 ROW aa="bar", ab="bar" | LOOKUP big ON aa, ab ; @@ -164,7 +164,7 @@ bar | bar | null | null ; lookupBeforeStats -required_capability: tables_types +required_capability: lookup_v4 FROM employees | RENAME languages AS int | LOOKUP int_number_names ON int @@ -182,7 +182,7 @@ height:double | languages:keyword ; lookupAfterStats -required_capability: tables_types +required_capability: lookup_v4 FROM employees | STATS int=TO_INT(AVG(height)) | LOOKUP int_number_names ON int @@ -194,7 +194,7 @@ two // Makes sure the LOOKUP squashes previous names doesNotDuplicateNames -required_capability: tables_types +required_capability: lookup_v4 FROM employees | SORT emp_no | LIMIT 4 @@ -213,7 +213,7 @@ emp_no:integer | languages:long | name:keyword ; lookupBeforeSort -required_capability: tables_types +required_capability: lookup_v4 FROM employees | WHERE emp_no < 10005 | RENAME languages AS int @@ -231,7 +231,7 @@ languages:keyword | emp_no:integer ; lookupAfterSort -required_capability: tables_types +required_capability: lookup_v4 FROM employees | WHERE emp_no < 10005 | SORT languages ASC, emp_no ASC @@ -248,12 +248,38 @@ languages:keyword | emp_no:integer five | 10004 ; +shadowing +required_capability: lookup_v4 +FROM employees +| KEEP emp_no +| WHERE emp_no == 10001 +| EVAL left = "left", int = emp_no - 10000, name = "name", right = "right" +| LOOKUP int_number_names ON int +; + +emp_no:integer | left:keyword | int:integer | right:keyword | name:keyword + 10001 | left | 1 | right | one +; + +shadowingMulti +required_capability: lookup_v4 +FROM employees +| KEEP emp_no +| WHERE emp_no == 10001 +| EVAL left = "left", nb = -10011+emp_no, na = "na", middle = "middle", ab = "ab", aa = "bar", right = "right" +| LOOKUP big ON aa, nb +; + +emp_no:integer | left:keyword | nb:integer | middle:keyword | aa:keyword | right:keyword | ab:keyword | na:integer + 10001 | left | -10 | middle | bar | right | zop | 10 +; + // // Make sure that the new LOOKUP syntax doesn't clash with any existing things // named "lookup" // rowNamedLookup -required_capability: tables_types +required_capability: lookup_v4 ROW lookup = "a" ; @@ -262,7 +288,7 @@ lookup:keyword ; rowNamedLOOKUP -required_capability: tables_types +required_capability: lookup_v4 ROW LOOKUP = "a" ; @@ -271,7 +297,7 @@ LOOKUP:keyword ; evalNamedLookup -required_capability: tables_types +required_capability: lookup_v4 ROW a = "a" | EVAL lookup = CONCAT(a, "1") ; @@ -280,7 +306,7 @@ a:keyword | lookup:keyword ; dissectNamedLookup -required_capability: tables_types +required_capability: lookup_v4 row a = "foo bar" | dissect a "foo %{lookup}"; a:keyword | lookup:keyword @@ -288,7 +314,7 @@ a:keyword | lookup:keyword ; renameIntoLookup -required_capability: tables_types +required_capability: lookup_v4 row a = "foo bar" | RENAME a AS lookup; lookup:keyword @@ -296,7 +322,7 @@ lookup:keyword ; sortOnLookup -required_capability: tables_types +required_capability: lookup_v4 ROW lookup = "a" | SORT lookup ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-distances.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-distances.json new file mode 100644 index 0000000000000..8e29f9e71f129 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-distances.json @@ -0,0 +1,10 @@ +{ + "properties": { + "distance": { + "type": "double" + }, + "location": { + "type": "geo_point" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec index be6cd058d24e9..8337af42df5ea 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec @@ -443,8 +443,8 @@ from employees | keep emp_no, languages, salary, avg_worked_seconds, l1, l2, l3 | limit 5; -warning:Line 2:13: evaluation of [LOG(languages, salary)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 2:13: java.lang.ArithmeticException: Log of base 1 +warningRegex:evaluation of \[LOG\(languages, salary\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.ArithmeticException: Log of base 1 emp_no:integer | languages:integer | salary:integer | avg_worked_seconds:long | l1:double | l2:double | l3:double 10001 | 2 | 57305 | 268728049 | 15.806373402659007 | 19.409210455930772 | 35.21558385858978 @@ -481,8 +481,8 @@ from employees | keep l1, l2, emp_no, languages, salary, avg_worked_seconds, l3 | limit 5; -warning:Line 2:13: evaluation of [LOG(languages, salary)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 2:13: java.lang.ArithmeticException: Log of base 1 +warningRegex:evaluation of \[LOG\(languages, salary\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.ArithmeticException: Log of base 1 l1:double | l2:double | emp_no:integer | languages:integer | salary:integer | avg_worked_seconds:long | l3:double 6.300030441266983 | 19.782340222815456 | 10015 | 5 | 25324 | 390266432 | 26.08237066408244 @@ -502,8 +502,8 @@ from employees | keep l1, l2, emp_no, base1, salary, avg_worked_seconds, l3 | limit 5; -warning:Line 3:13: evaluation of [LOG(base1, salary)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 3:13: java.lang.ArithmeticException: Log of base 1 +warningRegex:evaluation of \[LOG\(base1, salary\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.ArithmeticException: Log of base 1 l1:double | l2:double | emp_no:integer | base1:integer | salary:integer | avg_worked_seconds:long | l3:double null | 19.774989878141827 | 10044 | 1 | 39728 | 387408356 | null diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index 0fb35b4253d6d..f44f1041a6800 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -1,4 +1,4 @@ -metaFunctionsSynopsis#[skip:-8.14.99] +metaFunctionsSynopsis#[skip:-8.15.99] meta functions | keep synopsis; synopsis:keyword @@ -38,10 +38,10 @@ double e() "double log(?base:integer|unsigned_long|long|double, number:integer|unsigned_long|long|double)" "double log10(number:double|integer|long|unsigned_long)" "keyword|text ltrim(string:keyword|text)" -"double|integer|long|date max(number:double|integer|long|date)" +"boolean|double|integer|long|date max(field:boolean|double|integer|long|date)" "double|integer|long median(number:double|integer|long)" "double|integer|long median_absolute_deviation(number:double|integer|long)" -"double|integer|long|date min(number:double|integer|long|date)" +"boolean|double|integer|long|date min(field:boolean|double|integer|long|date)" "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version mv_append(field1:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version, field2:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version)" "double mv_avg(number:double|integer|long|unsigned_long)" "keyword mv_concat(string:text|keyword, delim:text|keyword)" @@ -73,6 +73,7 @@ double pi() "geo_point|cartesian_point st_centroid_agg(field:geo_point|cartesian_point)" "boolean st_contains(geomA:geo_point|cartesian_point|geo_shape|cartesian_shape, geomB:geo_point|cartesian_point|geo_shape|cartesian_shape)" "boolean st_disjoint(geomA:geo_point|cartesian_point|geo_shape|cartesian_shape, geomB:geo_point|cartesian_point|geo_shape|cartesian_shape)" +"double st_distance(geomA:geo_point|cartesian_point, geomB:geo_point|cartesian_point)" "boolean st_intersects(geomA:geo_point|cartesian_point|geo_shape|cartesian_shape, geomB:geo_point|cartesian_point|geo_shape|cartesian_shape)" "boolean st_within(geomA:geo_point|cartesian_point|geo_shape|cartesian_shape, geomB:geo_point|cartesian_point|geo_shape|cartesian_shape)" "double st_x(point:geo_point|cartesian_point)" @@ -109,12 +110,13 @@ double tau() "keyword|text to_upper(str:keyword|text)" "version to_ver(field:keyword|text|version)" "version to_version(field:keyword|text|version)" -"double|integer|long|date top_list(field:double|integer|long|date, limit:integer, order:keyword)" +"double|integer|long|date top(field:double|integer|long|date, limit:integer, order:keyword)" "keyword|text trim(string:keyword|text)" "boolean|date|double|integer|ip|keyword|long|text|version values(field:boolean|date|double|integer|ip|keyword|long|text|version)" +"double weighted_avg(number:double|integer|long, weight:double|integer|long)" ; -metaFunctionsArgs#[skip:-8.14.99] +metaFunctionsArgs#[skip:-8.15.99] META functions | EVAL name = SUBSTRING(name, 0, 14) | KEEP name, argNames, argTypes, argDescriptions; @@ -156,10 +158,10 @@ locate |[string, substring, start] |["keyword|text", "keyword|te log |[base, number] |["integer|unsigned_long|long|double", "integer|unsigned_long|long|double"] |["Base of logarithm. If `null`\, the function returns `null`. If not provided\, this function returns the natural logarithm (base e) of a value.", "Numeric expression. If `null`\, the function returns `null`."] log10 |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. ltrim |string |"keyword|text" |String expression. If `null`, the function returns `null`. -max |number |"double|integer|long|date" |[""] +max |field |"boolean|double|integer|long|date" |[""] median |number |"double|integer|long" |[""] median_absolut|number |"double|integer|long" |[""] -min |number |"double|integer|long|date" |[""] +min |field |"boolean|double|integer|long|date" |[""] mv_append |[field1, field2] |["boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version", "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version"] | ["", ""] mv_avg |number |"double|integer|long|unsigned_long" |Multivalue expression. mv_concat |[string, delim] |["text|keyword", "text|keyword"] |[Multivalue expression., Delimiter.] @@ -191,6 +193,7 @@ sqrt |number |"double|integer|long|unsigne st_centroid_ag|field |"geo_point|cartesian_point" |[""] st_contains |[geomA, geomB] |["geo_point|cartesian_point|geo_shape|cartesian_shape", "geo_point|cartesian_point|geo_shape|cartesian_shape"] |[Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`., Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.] st_disjoint |[geomA, geomB] |["geo_point|cartesian_point|geo_shape|cartesian_shape", "geo_point|cartesian_point|geo_shape|cartesian_shape"] |[Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`., Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.] +st_distance |[geomA, geomB] |["geo_point|cartesian_point", "geo_point|cartesian_point"] |[Expression of type `geo_point` or `cartesian_point`. If `null`\, the function returns `null`., Expression of type `geo_point` or `cartesian_point`. If `null`\, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_point` and `cartesian_point` parameters.] st_intersects |[geomA, geomB] |["geo_point|cartesian_point|geo_shape|cartesian_shape", "geo_point|cartesian_point|geo_shape|cartesian_shape"] |[Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`., Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.] st_within |[geomA, geomB] |["geo_point|cartesian_point|geo_shape|cartesian_shape", "geo_point|cartesian_point|geo_shape|cartesian_shape"] |[Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`., Expression of type `geo_point`\, `cartesian_point`\, `geo_shape` or `cartesian_shape`. If `null`\, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters.] st_x |point |"geo_point|cartesian_point" |Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. @@ -227,12 +230,13 @@ to_unsigned_lo|field |"boolean|date|keyword|text|d to_upper |str |"keyword|text" |String expression. If `null`, the function returns `null`. to_ver |field |"keyword|text|version" |Input value. The input can be a single- or multi-valued column or an expression. to_version |field |"keyword|text|version" |Input value. The input can be a single- or multi-valued column or an expression. -top_list |[field, limit, order] |["double|integer|long|date", integer, keyword] |[The field to collect the top values for.,The maximum number of values to collect.,The order to calculate the top values. Either `asc` or `desc`.] +top |[field, limit, order] |["double|integer|long|date", integer, keyword] |[The field to collect the top values for.,The maximum number of values to collect.,The order to calculate the top values. Either `asc` or `desc`.] trim |string |"keyword|text" |String expression. If `null`, the function returns `null`. values |field |"boolean|date|double|integer|ip|keyword|long|text|version" |[""] +weighted_avg |[number, weight] |["double|integer|long", "double|integer|long"] |[A numeric value., A numeric weight.] ; -metaFunctionsDescription#[skip:-8.14.99] +metaFunctionsDescription#[skip:-8.15.99] META functions | EVAL name = SUBSTRING(name, 0, 14) | KEEP name, description @@ -271,14 +275,14 @@ ip_prefix |Truncates an IP to a given prefix length. least |Returns the minimum value from multiple columns. This is similar to <> except it is intended to run on multiple columns at once. left |Returns the substring that extracts 'length' chars from 'string' starting from the left. length |Returns the character length of a string. -locate |Returns an integer that indicates the position of a keyword substring within another string +locate |Returns an integer that indicates the position of a keyword substring within another string. log |Returns the logarithm of a value to a base. The input can be any numeric value, the return value is always a double. Logs of zero, negative numbers, and base of one return `null` as well as a warning. log10 |Returns the logarithm of a value to base 10. The input can be any numeric value, the return value is always a double. Logs of 0 and negative numbers return `null` as well as a warning. ltrim |Removes leading whitespaces from a string. -max |The maximum value of a numeric field. +max |The maximum value of a field. median |The value that is greater than half of all values and less than half of all values. median_absolut|The median absolute deviation, a measure of variability. -min |The minimum value of a numeric field. +min |The minimum value of a field. mv_append |Concatenates values of two multi-value fields. mv_avg |Converts a multivalued field into a single valued field containing the average of all of the values. mv_concat |Converts a multivalued string expression into a single valued column containing the concatenation of all values separated by a delimiter. @@ -310,12 +314,13 @@ sqrt |Returns the square root of a number. The input can be any numeric st_centroid_ag|The centroid of a spatial field. st_contains |Returns whether the first geometry contains the second geometry. This is the inverse of the <> function. st_disjoint |Returns whether the two geometries or geometry columns are disjoint. This is the inverse of the <> function. In mathematical terms: ST_Disjoint(A, B) ⇔ A ⋂ B = ∅ +st_distance |Computes the distance between two points. For cartesian geometries, this is the pythagorean distance in the same units as the original coordinates. For geographic geometries, this is the circular distance along the great circle in meters. st_intersects |Returns true if two geometries intersect. They intersect if they have any point in common, including their interior points (points along lines or within polygons). This is the inverse of the <> function. In mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅ st_within |Returns whether the first geometry is within the second geometry. This is the inverse of the <> function. st_x |Extracts the `x` coordinate from the supplied point. If the points is of type `geo_point` this is equivalent to extracting the `longitude` value. st_y |Extracts the `y` coordinate from the supplied point. If the points is of type `geo_point` this is equivalent to extracting the `latitude` value. starts_with |Returns a boolean that indicates whether a keyword string starts with another string. -substring |Returns a substring of a string, specified by a start position and an optional length +substring |Returns a substring of a string, specified by a start position and an optional length. sum |The sum of a numeric field. tan |Returns the {wikipedia}/Sine_and_cosine[Tangent] trigonometric function of an angle. tanh |Returns the {wikipedia}/Hyperbolic_functions[Tangent] hyperbolic function of an angle. @@ -346,12 +351,13 @@ to_unsigned_lo|Converts an input value to an unsigned long value. If the input p to_upper |Returns a new string representing the input string converted to upper case. to_ver |Converts an input string to a version value. to_version |Converts an input string to a version value. -top_list |Collects the top values for a field. Includes repeated values. +top |Collects the top values for a field. Includes repeated values. trim |Removes leading and trailing whitespaces from a string. values |Collect values for a field. +weighted_avg |The weighted average of a numeric field. ; -metaFunctionsRemaining#[skip:-8.14.99] +metaFunctionsRemaining#[skip:-8.15.99] META functions | EVAL name = SUBSTRING(name, 0, 14) | KEEP name, * @@ -395,10 +401,10 @@ locate |integer log |double |[true, false] |false |false log10 |double |false |false |false ltrim |"keyword|text" |false |false |false -max |"double|integer|long|date" |false |false |true +max |"boolean|double|integer|long|date" |false |false |true median |"double|integer|long" |false |false |true median_absolut|"double|integer|long" |false |false |true -min |"double|integer|long|date" |false |false |true +min |"boolean|double|integer|long|date" |false |false |true mv_append |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|version" |[false, false] |false |false mv_avg |double |false |false |false mv_concat |keyword |[false, false] |false |false @@ -430,6 +436,7 @@ sqrt |double st_centroid_ag|"geo_point|cartesian_point" |false |false |true st_contains |boolean |[false, false] |false |false st_disjoint |boolean |[false, false] |false |false +st_distance |double |[false, false] |false |false st_intersects |boolean |[false, false] |false |false st_within |boolean |[false, false] |false |false st_x |double |false |false |false @@ -466,12 +473,13 @@ to_unsigned_lo|unsigned_long to_upper |"keyword|text" |false |false |false to_ver |version |false |false |false to_version |version |false |false |false -top_list |"double|integer|long|date" |[false, false, false] |false |true +top |"double|integer|long|date" |[false, false, false] |false |true trim |"keyword|text" |false |false |false values |"boolean|date|double|integer|ip|keyword|long|text|version" |false |false |true +weighted_avg |"double" |[false, false] |false |true ; -metaFunctionsFiltered#[skip:-8.14.99] +metaFunctionsFiltered#[skip:-8.15.99] META FUNCTIONS | WHERE STARTS_WITH(name, "sin") ; @@ -481,11 +489,9 @@ sin |"double sin(angle:double|integer|long|unsigned_long)" |angle sinh |"double sinh(angle:double|integer|long|unsigned_long)" |angle |"double|integer|long|unsigned_long" | "An angle, in radians. If `null`, the function returns `null`." | double | "Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of an angle." | false | false | false ; - -// see https://github.com/elastic/elasticsearch/issues/102120 -countFunctions#[skip:-8.14.99, reason:BIN added] +countFunctions#[skip:-8.15.99] meta functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -110 | 110 | 110 +112 | 112 | 112 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index bc24c2d23adc4..02067e9dbe490 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -146,7 +146,7 @@ c:long | x:double | y:double ; ############################################### -# Tests for ST_CENTROID on GEO_POINT type +# Tests for ST_CENTROID_AGG on GEO_POINT type centroidFromAirports required_capability: st_centroid_agg @@ -841,10 +841,10 @@ FROM airports // end::st_within-airports[] ; -// tag::st_within-airports-results[] +// tag::st_within-airports-result[] abbrev:keyword | city:keyword | city_location:geo_point | country:keyword | location:geo_point | name:text | scalerank:i | type:k HOD | Al Ḩudaydah | POINT(42.9511 14.8022) | Yemen | POINT(42.97109630194 14.7552534413725) | Hodeidah Int'l | 9 | mid -// end::st_within-airports-results[] +// end::st_within-airports-result[] ; airportCityLocationPointWithin @@ -870,6 +870,309 @@ location:geo_point | city_location:geo_point | count:long POINT (0 0) | POINT (0 0) | 1 ; +############################################### +# Tests for ST_DISTANCE with GEO_POINT + +literalGeoPointDistanceLiteralPoint +required_capability: st_distance + +ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_GEOPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_GEOPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:geo_point | distance:double +"POINT(1 1)" | POINT(1 1) | 157249.5916907891 +"POINT(-1 -1)" | POINT(-1 -1) | 157249.6015756357 +"POINT(-1 1)" | POINT(-1 1) | 157249.5982806869 +"POINT(1 -1)" | POINT(1 -1) | 157249.59498573805 +; + +airportCityLocationPointDistance +required_capability: st_distance + +FROM airports +| EVAL distance = ST_DISTANCE(location, city_location) +| STATS distance=AVG(distance), count=COUNT() +; + +distance:double | count:long +15869.9876282387 | 891 +; + +airportDistanceToCityCopenhagen +required_capability: st_distance + +// tag::st_distance-airports[] +FROM airports +| WHERE abbrev == "CPH" +| EVAL distance = ST_DISTANCE(location, city_location) +| KEEP abbrev, name, location, city_location, distance +// end::st_distance-airports[] +; + +// tag::st_distance-airports-result[] +abbrev:k | name:text | location:geo_point | city_location:geo_point | distance:d +CPH | Copenhagen | POINT(12.6493508684508 55.6285017221528) | POINT(12.5683 55.6761) | 7339.573896618216 +// end::st_distance-airports-result[] +; + +airportsWithinPolygonDistanceFromCopenhagenTrainStation +required_capability: st_distance + +FROM airports +| WHERE ST_WITHIN(location, TO_GEOSHAPE("POLYGON((12 40, 14 40, 14 60, 12 60, 12 40))")) +| EVAL distance = ROUND(ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| EVAL city_distance = ROUND(ST_DISTANCE(city_location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| KEEP abbrev, name, location, country, city, city_location, distance, city_distance +| SORT distance ASC +; + +abbrev:k | name:text | location:geo_point | country:k | city:k | city_location:geo_point | distance:d | city_distance:d +CPH | Copenhagen | POINT(12.6493508684508 55.6285017221528) | Denmark | Copenhagen | POINT(12.5683 55.6761) | 7.24 | 0.4 +GOT | Gothenburg | POINT(12.2938269092573 57.6857493534879) | Sweden | Gothenburg | POINT(11.9675 57.7075) | 224.42 | 229.15 +TXL | Berlin-Tegel Int'l | POINT(13.2903090925074 52.5544287044101) | Germany | Hohen Neuendorf | POINT(13.2833 52.6667) | 349.97 | 337.53 +DRS | Dresden | POINT(13.7649671440047 51.1250912428871) | Germany | Dresden | POINT(13.74 51.05) | 511.9 | 519.91 +VCE | Venice Marco Polo | POINT(12.3410673004369 45.5048477588455) | Italy | Mestre | POINT(12.2381 45.4906) | 1130.76 | 1132.46 +FCO | Leonardo da Vinci Int'l | POINT(12.2501008973638 41.7950786307394) | Italy | Fiumicino | POINT(12.2333 41.7667) | 1543.33 | 1546.5 +; + +airportsWithinDistanceFromCopenhagenTrainStation +required_capability: st_distance + +FROM airports +| WHERE ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) < 600000 +| EVAL distance = ROUND(ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| EVAL city_distance = ROUND(ST_DISTANCE(city_location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| KEEP abbrev, name, location, country, city, city_location, distance, city_distance +| SORT distance ASC +; + +abbrev:k | name:text | location:geo_point | country:k | city:k | city_location:geo_point | distance:d | city_distance:d +CPH | Copenhagen | POINT(12.6493508684508 55.6285017221528) | Denmark | Copenhagen | POINT(12.5683 55.6761) | 7.24 | 0.4 +GOT | Gothenburg | POINT(12.2938269092573 57.6857493534879) | Sweden | Gothenburg | POINT(11.9675 57.7075) | 224.42 | 229.15 +HAM | Hamburg | POINT(10.005647830925 53.6320011640866) | Germany | Norderstedt | POINT(10.0103 53.7064) | 280.34 | 273.42 +TXL | Berlin-Tegel Int'l | POINT(13.2903090925074 52.5544287044101) | Germany | Hohen Neuendorf | POINT(13.2833 52.6667) | 349.97 | 337.53 +BRE | Bremen | POINT(8.7858617703132 53.052287104156) | Germany | Bremen | POINT(8.8 53.0833) | 380.5 | 377.22 +NRK | Norrköping Airport | POINT(16.2339407695814 58.5833805017541) | Sweden | Norrköping | POINT(16.2 58.6) | 392.0 | 392.35 +GDN | Gdansk Lech Walesa | POINT(18.4684422165911 54.3807025352925) | Poland | Gdańsk | POINT(18.6453 54.3475) | 402.61 | 414.59 +NYO | Stockholm-Skavsta | POINT(16.9216055584254 58.7851041303448) | Sweden | Nyköping | POINT(17.0086 58.7531) | 433.99 | 434.43 +OSL | Oslo Gardermoen | POINT(11.0991032762581 60.1935783171386) | Norway | Oslo | POINT(10.7389 59.9133) | 510.03 | 483.71 +DRS | Dresden | POINT(13.7649671440047 51.1250912428871) | Germany | Dresden | POINT(13.74 51.05) | 511.9 | 519.91 +BMA | Bromma | POINT(17.9456175406145 59.3555902065112) | Sweden | Stockholm | POINT(18.0686 59.3294) | 520.18 | 522.54 +PLQ | Palanga Int'l | POINT(21.0974463986251 55.9713426235358) | Lithuania | Klaipėda | POINT(21.1667 55.75) | 533.67 | 538.56 +ARN | Arlanda | POINT(17.9307299016916 59.6511203397372) | Sweden | Stockholm | POINT(18.0686 59.3294) | 545.09 | 522.54 +SVG | Stavanger Sola | POINT (5.6298103297218 58.8821564842185) | Norway | Sandnes | POINT (5.7361 58.8517) | 548.26 | 541.35 +; + +airportsWithinDistanceBandFromCopenhagenTrainStation +required_capability: st_distance + +FROM airports +| WHERE ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) < 600000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) > 400000 +| EVAL distance = ROUND(ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| EVAL city_distance = ROUND(ST_DISTANCE(city_location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| KEEP abbrev, name, location, country, city, city_location, distance, city_distance +| SORT distance ASC +; + +abbrev:k | name:text | location:geo_point | country:k | city:k | city_location:geo_point | distance:d | city_distance:d +GDN | Gdansk Lech Walesa | POINT(18.4684422165911 54.3807025352925) | Poland | Gdańsk | POINT(18.6453 54.3475) | 402.61 | 414.59 +NYO | Stockholm-Skavsta | POINT(16.9216055584254 58.7851041303448) | Sweden | Nyköping | POINT(17.0086 58.7531) | 433.99 | 434.43 +OSL | Oslo Gardermoen | POINT(11.0991032762581 60.1935783171386) | Norway | Oslo | POINT(10.7389 59.9133) | 510.03 | 483.71 +DRS | Dresden | POINT(13.7649671440047 51.1250912428871) | Germany | Dresden | POINT(13.74 51.05) | 511.9 | 519.91 +BMA | Bromma | POINT(17.9456175406145 59.3555902065112) | Sweden | Stockholm | POINT(18.0686 59.3294) | 520.18 | 522.54 +PLQ | Palanga Int'l | POINT(21.0974463986251 55.9713426235358) | Lithuania | Klaipėda | POINT(21.1667 55.75) | 533.67 | 538.56 +ARN | Arlanda | POINT(17.9307299016916 59.6511203397372) | Sweden | Stockholm | POINT(18.0686 59.3294) | 545.09 | 522.54 +SVG | Stavanger Sola | POINT (5.6298103297218 58.8821564842185) | Norway | Sandnes | POINT (5.7361 58.8517) | 548.26 | 541.35 +; + +airportsWithComplexDistancePredicateFromCopenhagenTrainStation +required_capability: st_distance + +FROM airports +| WHERE (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 600000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 400000) + OR + (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) < 300000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) > 200000) +| EVAL distance = ROUND(ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| EVAL city_distance = ROUND(ST_DISTANCE(city_location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| KEEP abbrev, name, location, country, city, city_location, distance, city_distance +| SORT distance ASC +; + +abbrev:k | name:text | location:geo_point | country:k | city:k | city_location:geo_point | distance:d | city_distance:d +GOT | Gothenburg | POINT(12.2938269092573 57.6857493534879) | Sweden | Gothenburg | POINT(11.9675 57.7075) | 224.42 | 229.15 +HAM | Hamburg | POINT(10.005647830925 53.6320011640866) | Germany | Norderstedt | POINT(10.0103 53.7064) | 280.34 | 273.42 +GDN | Gdansk Lech Walesa | POINT(18.4684422165911 54.3807025352925) | Poland | Gdańsk | POINT(18.6453 54.3475) | 402.61 | 414.59 +NYO | Stockholm-Skavsta | POINT(16.9216055584254 58.7851041303448) | Sweden | Nyköping | POINT(17.0086 58.7531) | 433.99 | 434.43 +OSL | Oslo Gardermoen | POINT(11.0991032762581 60.1935783171386) | Norway | Oslo | POINT(10.7389 59.9133) | 510.03 | 483.71 +DRS | Dresden | POINT(13.7649671440047 51.1250912428871) | Germany | Dresden | POINT(13.74 51.05) | 511.9 | 519.91 +BMA | Bromma | POINT(17.9456175406145 59.3555902065112) | Sweden | Stockholm | POINT(18.0686 59.3294) | 520.18 | 522.54 +PLQ | Palanga Int'l | POINT(21.0974463986251 55.9713426235358) | Lithuania | Klaipėda | POINT(21.1667 55.75) | 533.67 | 538.56 +ARN | Arlanda | POINT(17.9307299016916 59.6511203397372) | Sweden | Stockholm | POINT(18.0686 59.3294) | 545.09 | 522.54 +SVG | Stavanger Sola | POINT (5.6298103297218 58.8821564842185) | Norway | Sandnes | POINT (5.7361 58.8517) | 548.26 | 541.35 +; + +airportsWithVeryComplexDistancePredicateFromCopenhagenTrainStation +required_capability: st_distance + +FROM airports +| WHERE ((ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 600000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 400000 + AND NOT (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) < 500000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) > 430000)) + OR + (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) < 300000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) > 200000)) + AND NOT abbrev == "PLQ" + AND scalerank < 6 +| EVAL distance = ROUND(ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| EVAL city_distance = ROUND(ST_DISTANCE(city_location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| KEEP abbrev, scalerank, name, location, country, city, city_location, distance, city_distance +| SORT distance ASC +; + +abbrev:k | scalerank:i | name:text | location:geo_point | country:k | city:k | city_location:geo_point | distance:d | city_distance:d +HAM | 3 | Hamburg | POINT(10.005647830925 53.6320011640866) | Germany | Norderstedt | POINT(10.0103 53.7064) | 280.34 | 273.42 +OSL | 2 | Oslo Gardermoen | POINT(11.0991032762581 60.1935783171386) | Norway | Oslo | POINT(10.7389 59.9133) | 510.03 | 483.71 +BMA | 5 | Bromma | POINT(17.9456175406145 59.3555902065112) | Sweden | Stockholm | POINT(18.0686 59.3294) | 520.18 | 522.54 +ARN | 2 | Arlanda | POINT(17.9307299016916 59.6511203397372) | Sweden | Stockholm | POINT(18.0686 59.3294) | 545.09 | 522.54 +; + +airportsWithinDistanceCopenhagenTrainStationCount +required_capability: st_distance + +FROM airports +| WHERE ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) < 500000 +| STATS count=COUNT() BY country +| SORT count DESC, country ASC +; + +count:long | country:k +3 | Germany +3 | Sweden +1 | Denmark +1 | Poland +; + +airportsWithinDistanceBandCopenhagenTrainStationCount +required_capability: st_distance + +FROM airports +| WHERE ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) < 600000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) > 400000 +| STATS count=COUNT() BY country +| SORT count DESC, country ASC +; + +count:long | country:k +3 | Sweden +2 | Norway +1 | Germany +1 | Lithuania +1 | Poland +; + +airportsSortDistanceFromCopenhagenTrainStation +required_capability: st_distance + +FROM airports +| EVAL distance = ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) +| SORT distance ASC +| LIMIT 5 +| EVAL distance = ROUND(distance/1000,2) +| EVAL city_distance = ROUND(ST_DISTANCE(city_location, TO_GEOPOINT("POINT(12.565 55.673)"))/1000,2) +| KEEP abbrev, name, location, country, city, city_location, distance, city_distance +; + +abbrev:k | name:text | location:geo_point | country:k | city:k | city_location:geo_point | distance:d | city_distance:d +CPH | Copenhagen | POINT(12.6493508684508 55.6285017221528) | Denmark | Copenhagen | POINT(12.5683 55.6761) | 7.24 | 0.4 +GOT | Gothenburg | POINT(12.2938269092573 57.6857493534879) | Sweden | Gothenburg | POINT(11.9675 57.7075) | 224.42 | 229.15 +HAM | Hamburg | POINT(10.005647830925 53.6320011640866) | Germany | Norderstedt | POINT(10.0103 53.7064) | 280.34 | 273.42 +TXL | Berlin-Tegel Int'l | POINT(13.2903090925074 52.5544287044101) | Germany | Hohen Neuendorf | POINT(13.2833 52.6667) | 349.97 | 337.53 +BRE | Bremen | POINT(8.7858617703132 53.052287104156) | Germany | Bremen | POINT(8.8 53.0833) | 380.5 | 377.22 +; + +airportsSortDistanceFromAirportToCity +required_capability: st_distance + +FROM airports +| EVAL distance = ST_DISTANCE(location, city_location) +| SORT distance ASC +| LIMIT 5 +| KEEP abbrev, name, location, country, city, city_location, distance +; + +abbrev:k | name:text | location:geo_point | country:k | city:k | city_location:geo_point | distance:d +TRD | Trondheim Vaernes | POINT(10.9168095241445 63.472029381717) | Norway | Stjørdalshalsen | POINT(10.9189 63.4712) | 138.86985803478004 +DHA | King Abdulaziz AB | POINT(50.1477245727844 26.2703680854768) | Saudi Arabia | Dhahran | POINT(50.15 26.2667) | 466.7321285739462 +NDB | Nouadhibou Int'l | POINT(-17.0334398691538 20.9290523064387) | Mauritania | Nouadhibou | POINT(-17.0333 20.9333) | 472.54642026512636 +ESE | Ensenada | POINT(-116.595724400418 31.7977139760569) | Mexico | Rodolfo Sánchez Taboada | POINT(-116.5911 31.7958) | 486.1022373716486 +INU | Nauru Int'l | POINT(166.91613965882 -0.545037226856384) | Nauru | Yaren | POINT(166.9209 -0.5477) | 606.4899254580574 +; + +distancesNearQuantizationBoundary +required_capability: st_distance + +FROM distances +| EVAL d = ST_DISTANCE(location, TO_GEOPOINT("POINT(0 0)")) +| EVAL delta = ABS(distance - d) +| WHERE delta > 0 +| KEEP distance, d, delta, location +; + +distance:double | d:double | delta:double | location:geo_point +; + +distancesNearQuantizationBoundaryStats +required_capability: st_distance + +FROM distances +| EVAL d = ST_DISTANCE(location, TO_GEOPOINT("POINT(0 0)")) +| STATS count=COUNT(*) BY d +| SORT d ASC +; + +count:long | d:double +12 | 0.2685179415497728 +3 | 0.26851794154977293 +30 | 0.2848062860101459 +15 | 0.2848062860101461 +30 | 0.30021218524180354 +3 | 3.258374219844941 +6 | 3.2597569375901188 +6 | 3.262520615015394 +12 | 3.2625206150153967 +; + +distancesNearQuantizationBoundaryFilterStatsA +required_capability: st_distance + +FROM distances +| WHERE ST_DISTANCE(location, TO_GEOPOINT("POINT(0 0)")) == 0.2848062860101461 +| STATS count=COUNT(*) +; + +count:long +15 +; + +distancesNearQuantizationBoundaryFilterStatsB +required_capability: st_distance + +FROM distances +| WHERE ST_DISTANCE(location, TO_GEOPOINT("POINT(0 0)")) == 3.2625206150153967 +| STATS count=COUNT(*) +; + +count:long +12 +; + ############################################### # Tests for Equality and casting with GEO_POINT @@ -1036,7 +1339,7 @@ ZAH | POINT (6779435.866395892 3436280.545331025) | Zahedan Int'l ; ############################################### -# Tests for ST_CENTROID on CARTESIAN_POINT type +# Tests for ST_CENTROID_AGG on CARTESIAN_POINT type cartesianCentroidFromAirports required_capability: st_centroid_agg @@ -1675,7 +1978,134 @@ POINT (4783520.5 1661010.0) | 1 ; ############################################### -# Tests for Equality and casting with GEO_POINT +# Tests for ST_DISTANCE with CARTESIAN_POINT + +literalCartesianPointDistanceLiteralPoint +required_capability: st_distance + +ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_CARTESIANPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_CARTESIANPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:cartesian_point | distance:double +"POINT(1 1)" | POINT(1 1) | 1.4142135623730951 +"POINT(-1 -1)" | POINT(-1 -1) | 1.4142135623730951 +"POINT(-1 1)" | POINT(-1 1) | 1.4142135623730951 +"POINT(1 -1)" | POINT(1 -1) | 1.4142135623730951 +; + +airportCartesianCityLocationPointDistance +required_capability: st_distance + +FROM airports_web +| EVAL distance = ST_DISTANCE(location, TO_CARTESIANPOINT("POINT(1402900 7490000)")) +| WHERE distance < 1000000 +| STATS distance=AVG(distance), min=min(distance), max=max(distance), count=COUNT() +; + +distance:double | min:double | max:double | count:long +676858.3463435044 | 7358.02077507206 | 971112.9731194031 | 12 +; + +airportCartesianDistanceToCityCopenhagen +required_capability: st_distance + +// tag::st_distance-airports_web[] +FROM airports_web +| WHERE abbrev == "CPH" +| EVAL distance = ST_DISTANCE(location, TO_CARTESIANPOINT("POINT(1402900 7490000)")) +| KEEP abbrev, name, location, distance +// end::st_distance-airports_web[] +; + +// tag::st_distance-airports_web-result[] +abbrev:k | name:text | location:cartesian_point | distance:d +CPH | Copenhagen | POINT(1408119.2975413958 7484813.53657096) | 7358.02077507206 +// end::st_distance-airports_web-result[] +; + +airportsWithinPolygonCartesianDistanceFromCopenhagenTrainStation +required_capability: st_distance + +FROM airports_web +| WHERE ST_WITHIN(location, TO_CARTESIANPOINT("POLYGON((1300000 5000000, 1550000 5000000, 1550000 8000000, 1300000 8000000, 1300000 5000000))")) +| EVAL distance = ROUND(ST_DISTANCE(location, TO_CARTESIANPOINT("POINT(1402900 7490000)"))/1000,2) +| KEEP abbrev, name, location, distance +| SORT distance ASC +; + +abbrev:k | name:text | location:cartesian_point | distance:d +CPH | Copenhagen | POINT(1408119.2975413958 7484813.53657096) | 7.36 +GOT | Gothenburg | POINT(1368542.5514391668 7901590.990691348) | 413.02 +TXL | Berlin-Tegel Int'l | POINT(1479470.4406631375 6901000.847325224) | 593.96 +DRS | Dresden | POINT(1532309.1332567444 6643450.815136505) | 856.38 +MUC | Franz-Josef-Strauss | POINT(1312241.1393453805 6165923.947863139) | 1327.18 +MUCf | Munich Freight Terminal | POINT(1310172.8388047176 6165247.042460089) | 1327.99 +VCE | Venice Marco Polo | POINT(1373801.3277301549 5701352.694091503) | 1788.88 +FCO | Leonardo da Vinci Int'l | POINT(1363674.994060762 5130332.471894705) | 2359.99 +; + +airportsWithinDistanceFromCopenhagenTrainStationCartesian +required_capability: st_distance + +FROM airports_web +| WHERE ST_DISTANCE(location, TO_CARTESIANPOINT("POINT(1402900 7490000)")) < 1000000 +| EVAL distance = ROUND(ST_DISTANCE(location, TO_CARTESIANPOINT("POINT(1402900 7490000)"))/1000,2) +| KEEP abbrev, name, location, distance +| SORT distance ASC +; + +abbrev:k | name:text | location:cartesian_point | distance:d +CPH | Copenhagen | POINT(1408119.2975413958 7484813.53657096) | 7.36 +GOT | Gothenburg | POINT(1368542.5514391668 7901590.990691348) | 413.02 +HAM | Hamburg | POINT(1113823.6215953932 7100767.499507231) | 484.84 +TXL | Berlin-Tegel Int'l | POINT(1479470.4406631375 6901000.847325224) | 593.96 +BRE | Bremen | POINT(978037.6584513546 6992675.481947904) | 654.09 +GDN | Gdansk Lech Walesa | POINT(2055897.583295918 7242589.051225524) | 698.3 +NRK | Norrköping Airport | POINT(1807154.0200379654 8090879.147781534) | 724.21 +NYO | Stockholm-Skavsta | POINT(1883704.5141685435 8134083.898609498) | 803.75 +DRS | Dresden | POINT(1532309.1332567444 6643450.815136505) | 856.38 +PLQ | Palanga Int'l | POINT(2348556.9901333293 7552712.896849933) | 947.73 +OSL | Oslo Gardermoen | POINT(1235546.524975006 8442962.64811417) | 967.55 +BMA | Bromma | POINT(1997697.0065920444 8257643.750388128) | 971.11 +; + +airportsWithinDistanceCopenhagenTrainStationCountCartesian +required_capability: st_distance + +FROM airports_web +| WHERE ST_DISTANCE(location, TO_CARTESIANPOINT("POINT(1402900 7490000)")) < 1000000 +| STATS count=COUNT() +| SORT count DESC +; + +count:long +12 +; + +airportsSortDistanceFromCopenhagenTrainStationCartesian +required_capability: st_distance + +FROM airports_web +| EVAL distance = ST_DISTANCE(location, TO_CARTESIANPOINT("POINT(1402900 7490000)")) +| SORT distance ASC +| LIMIT 5 +| EVAL distance = ROUND(distance/1000,2) +| KEEP abbrev, name, location, distance +; + +abbrev:k | name:text | location:cartesian_point | distance:d +CPH | Copenhagen | POINT(1408119.2975413958 7484813.53657096) | 7.36 +GOT | Gothenburg | POINT(1368542.5514391668 7901590.990691348) | 413.02 +HAM | Hamburg | POINT(1113823.6215953932 7100767.499507231) | 484.84 +TXL | Berlin-Tegel Int'l | POINT(1479470.4406631375 6901000.847325224) | 593.96 +BRE | Bremen | POINT(978037.6584513546 6992675.481947904) | 654.09 +; + +############################################### +# Tests for Equality and casting with CARTESIAN_POINT cartesianPointEquals required_capability: spatial_points_from_source diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index bbc98ece3890a..b567657c2d7e9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -31,6 +31,44 @@ MIN(languages):integer // end::min-result[] ; +maxOfBoolean +required_capability: agg_max_min_boolean_support +from employees | stats s = max(still_hired); + +s:boolean +true +; + +maxOfBooleanExpression +required_capability: agg_max_min_boolean_support +from employees +| eval x = salary is not null +| where emp_no > 10050 +| stats a = max(salary is not null), b = max(x), c = max(case(salary is null, true, false)), d = max(is_rehired); + +a:boolean | b:boolean | c:boolean | d:boolean +true | true | false | true +; + +minOfBooleanExpression +required_capability: agg_max_min_boolean_support +from employees +| eval x = salary is not null +| where emp_no > 10050 +| stats a = min(salary is not null), b = min(x), c = min(case(salary is null, true, false)), d = min(is_rehired); + +a:boolean | b:boolean | c:boolean | d:boolean +true | true | false | false +; + +minOfBoolean +required_capability: agg_max_min_boolean_support +from employees | stats s = min(still_hired); + +s:boolean +false +; + maxOfShort // short becomes int until https://github.com/elastic/elasticsearch-internal/issues/724 from employees | stats l = max(languages.short); @@ -1697,3 +1735,153 @@ FROM employees | STATS min = min(salary) by languages | SORT min + CASE(language 29175 |2 28336 |null ; + + +weightedAvg +required_capability: agg_weighted_avg +from employees +| stats w_avg_1 = weighted_avg(salary, 1), avg = avg(salary), w_avg_2 = weighted_avg(salary, height) +| EVAL w_avg_1 = ROUND(w_avg_1), avg = ROUND(avg), w_avg_2 = ROUND(w_avg_2) +; + +w_avg_1:double | avg:double | w_avg_2:double +48249.0 | 48249.0 | 48474.0 +; + +weightedAvgGrouping +required_capability: agg_weighted_avg +// tag::weighted-avg[] +FROM employees +| STATS w_avg = WEIGHTED_AVG(salary, height) by languages +| EVAL w_avg = ROUND(w_avg) +| KEEP w_avg, languages +| SORT languages +// end::weighted-avg[] +; + +// tag::weighted-avg-result[] +w_avg:double | languages:integer +51464.0 | 1 +48477.0 | 2 +52379.0 | 3 +47990.0 | 4 +42119.0 | 5 +52142.0 | null +// end::weighted-avg-result[] +; + +weightedAvgConstant +required_capability: agg_weighted_avg +row v = [1, 2, 3] +| stats w_avg_1 = weighted_avg(v, 1), w_avg_2 = weighted_avg([1, 2, 3], 1), avg = avg(v) +| EVAL w_avg_1 = ROUND(w_avg_1), w_avg_2 = ROUND(w_avg_2), avg = ROUND(avg) +; + +w_avg_1:double |w_avg_2:double |avg:double +2.0 | 2.0 | 2.0 +; + +weightedAvgBothConstantsMvWarning +required_capability: agg_weighted_avg +row v = [1, 2, 3], w = [1, 2, 3] +| stats w_avg = weighted_avg(v, w) +; +warning:Line 2:17: evaluation of [weighted_avg(v, w)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 2:17: java.lang.IllegalArgumentException: single-value function encountered multi-value + +w_avg:double +null +; + +weightedAvgWeightConstantMvWarning +required_capability: agg_weighted_avg +from employees +| eval w = [1, 2, 3] +| stats w_avg = weighted_avg(salary, w) +; +warning:Line 3:17: evaluation of [weighted_avg(salary, w)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:17: java.lang.IllegalArgumentException: single-value function encountered multi-value + +w_avg:double +null +; + +weightedAvgWeightMvWarning +required_capability: agg_weighted_avg +from employees +| where emp_no == 10002 or emp_no == 10003 +| stats w_avg = weighted_avg(salary, salary_change.int) +; +warning:Line 3:17: evaluation of [weighted_avg(salary, salary_change.int)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:17: java.lang.IllegalArgumentException: single-value function encountered multi-value + +w_avg:double +null +; + +weightedAvgFieldMvWarning +required_capability: agg_weighted_avg +from employees +| where emp_no == 10002 or emp_no == 10003 +| stats w_avg = weighted_avg(salary_change.int, height) +; +warning:Line 3:17: evaluation of [weighted_avg(salary_change.int, height)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:17: java.lang.IllegalArgumentException: single-value function encountered multi-value + +w_avg:double +null +; + +weightedAvgWeightZero +required_capability: agg_weighted_avg +from employees +| eval w = 0 +| stats w_avg = weighted_avg(salary, w) +; +warning:Line 3:17: evaluation of [weighted_avg(salary, w)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:17: java.lang.ArithmeticException: / by zero + +w_avg:double +null +; + +weightedAvgWeightZeroExp +required_capability: agg_weighted_avg +from employees +| eval w = 0 + 0 +| stats w_avg = weighted_avg(salary, w) +; +warning:Line 3:17: evaluation of [weighted_avg(salary, w)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:17: java.lang.ArithmeticException: / by zero + +w_avg:double +null +; + +docsStatsMvGroup +// tag::mv-group[] +ROW i=1, a=["a", "b"] | STATS MIN(i) BY a | SORT a ASC +// end::mv-group[] +; + +// tag::mv-group-result[] +MIN(i):integer | a:keyword + 1 | a + 1 | b +// end::mv-group-result[] +; + +docsStatsMultiMvGroup +// tag::multi-mv-group[] +ROW i=1, a=["a", "b"], b=[2, 3] | STATS MIN(i) BY a, b | SORT a ASC, b ASC +// end::multi-mv-group[] +; + +// tag::multi-mv-group-result[] +MIN(i):integer | a:keyword | b:integer + 1 | a | 2 + 1 | a | 3 + 1 | b | 2 + 1 | b | 3 +// end::multi-mv-group-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec new file mode 100644 index 0000000000000..d03bdb3c3dfd7 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec @@ -0,0 +1,156 @@ +top +required_capability: agg_top +// tag::top[] +FROM employees +| STATS top_salaries = TOP(salary, 3, "desc"), top_salary = MAX(salary) +// end::top[] +; + +// tag::top-result[] +top_salaries:integer | top_salary:integer +[74999, 74970, 74572] | 74999 +// end::top-result[] +; + +topAllTypesAsc +required_capability: agg_top +FROM employees +| STATS + date = TOP(hire_date, 2, "asc"), + double = TOP(salary_change, 2, "asc"), + integer = TOP(salary, 2, "asc"), + long = TOP(salary_change.long, 2, "asc") +; + +date:date | double:double | integer:integer | long:long +[1985-02-18T00:00:00.000Z,1985-02-24T00:00:00.000Z] | [-9.81,-9.28] | [25324,25945] | [-9,-9] +; + +topAllTypesDesc +required_capability: agg_top +FROM employees +| STATS + date = TOP(hire_date, 2, "desc"), + double = TOP(salary_change, 2, "desc"), + integer = TOP(salary, 2, "desc"), + long = TOP(salary_change.long, 2, "desc") +; + +date:date | double:double | integer:integer | long:long +[1999-04-30T00:00:00.000Z,1997-05-19T00:00:00.000Z] | [14.74,14.68] | [74999,74970] | [14,14] +; + +topAllTypesRow +required_capability: agg_top +ROW + constant_date=TO_DATETIME("1985-02-18T00:00:00.000Z"), + constant_double=-9.81, + constant_integer=25324, + constant_long=TO_LONG(-9) +| STATS + date = TOP(constant_date, 2, "asc"), + double = TOP(constant_double, 2, "asc"), + integer = TOP(constant_integer, 2, "asc"), + long = TOP(constant_long, 2, "asc") +| keep date, double, integer, long +; + +date:date | double:double | integer:integer | long:long +1985-02-18T00:00:00.000Z | -9.81 | 25324 | -9 +; + +topSomeBuckets +required_capability: agg_top +FROM employees +| STATS top_salary = TOP(salary, 2, "desc") by still_hired +| sort still_hired asc +; + +top_salary:integer | still_hired:boolean +[74999,74970] | false +[74572,73578] | true +; + +topManyBuckets +required_capability: agg_top +FROM employees +| STATS top_salary = TOP(salary, 2, "desc") by x=emp_no, y=emp_no+1 +| sort x asc +| limit 3 +; + +top_salary:integer | x:integer | y:integer +57305 | 10001 | 10002 +56371 | 10002 | 10003 +61805 | 10003 | 10004 +; + +topMultipleStats +required_capability: agg_top +FROM employees +| STATS top_salary = TOP(salary, 1, "desc") by emp_no +| STATS top_salary = TOP(top_salary, 3, "asc") +; + +top_salary:integer +[25324,25945,25976] +; + +topAllTypesMin +required_capability: agg_top +FROM employees +| STATS + date = TOP(hire_date, 1, "asc"), + double = TOP(salary_change, 1, "asc"), + integer = TOP(salary, 1, "asc"), + long = TOP(salary_change.long, 1, "asc") +; + +date:date | double:double | integer:integer | long:long +1985-02-18T00:00:00.000Z | -9.81 | 25324 | -9 +; + +topAllTypesMax +required_capability: agg_top +FROM employees +| STATS + date = TOP(hire_date, 1, "desc"), + double = TOP(salary_change, 1, "desc"), + integer = TOP(salary, 1, "desc"), + long = TOP(salary_change.long, 1, "desc") +; + +date:date | double:double | integer:integer | long:long +1999-04-30T00:00:00.000Z | 14.74 | 74999 | 14 +; + +topAscDesc +required_capability: agg_top +FROM employees +| STATS top_asc = TOP(salary, 3, "asc"), top_desc = TOP(salary, 3, "desc") +; + +top_asc:integer | top_desc:integer +[25324, 25945, 25976] | [74999, 74970, 74572] +; + +topEmpty +required_capability: agg_top +FROM employees +| WHERE salary < 0 +| STATS top = TOP(salary, 3, "asc") +; + +top:integer +null +; + +topDuplicates +required_capability: agg_top +FROM employees +| STATS integer = TOP(languages, 2, "desc") +; + +integer:integer +[5, 5] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top_list.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top_list.csv-spec deleted file mode 100644 index c24f6a7e70954..0000000000000 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top_list.csv-spec +++ /dev/null @@ -1,156 +0,0 @@ -topList -required_capability: agg_top_list -// tag::top-list[] -FROM employees -| STATS top_salaries = TOP_LIST(salary, 3, "desc"), top_salary = MAX(salary) -// end::top-list[] -; - -// tag::top-list-result[] -top_salaries:integer | top_salary:integer -[74999, 74970, 74572] | 74999 -// end::top-list-result[] -; - -topListAllTypesAsc -required_capability: agg_top_list -FROM employees -| STATS - date = TOP_LIST(hire_date, 2, "asc"), - double = TOP_LIST(salary_change, 2, "asc"), - integer = TOP_LIST(salary, 2, "asc"), - long = TOP_LIST(salary_change.long, 2, "asc") -; - -date:date | double:double | integer:integer | long:long -[1985-02-18T00:00:00.000Z,1985-02-24T00:00:00.000Z] | [-9.81,-9.28] | [25324,25945] | [-9,-9] -; - -topListAllTypesDesc -required_capability: agg_top_list -FROM employees -| STATS - date = TOP_LIST(hire_date, 2, "desc"), - double = TOP_LIST(salary_change, 2, "desc"), - integer = TOP_LIST(salary, 2, "desc"), - long = TOP_LIST(salary_change.long, 2, "desc") -; - -date:date | double:double | integer:integer | long:long -[1999-04-30T00:00:00.000Z,1997-05-19T00:00:00.000Z] | [14.74,14.68] | [74999,74970] | [14,14] -; - -topListAllTypesRow -required_capability: agg_top_list -ROW - constant_date=TO_DATETIME("1985-02-18T00:00:00.000Z"), - constant_double=-9.81, - constant_integer=25324, - constant_long=TO_LONG(-9) -| STATS - date = TOP_LIST(constant_date, 2, "asc"), - double = TOP_LIST(constant_double, 2, "asc"), - integer = TOP_LIST(constant_integer, 2, "asc"), - long = TOP_LIST(constant_long, 2, "asc") -| keep date, double, integer, long -; - -date:date | double:double | integer:integer | long:long -1985-02-18T00:00:00.000Z | -9.81 | 25324 | -9 -; - -topListSomeBuckets -required_capability: agg_top_list -FROM employees -| STATS top_salary = TOP_LIST(salary, 2, "desc") by still_hired -| sort still_hired asc -; - -top_salary:integer | still_hired:boolean -[74999,74970] | false -[74572,73578] | true -; - -topListManyBuckets -required_capability: agg_top_list -FROM employees -| STATS top_salary = TOP_LIST(salary, 2, "desc") by x=emp_no, y=emp_no+1 -| sort x asc -| limit 3 -; - -top_salary:integer | x:integer | y:integer -57305 | 10001 | 10002 -56371 | 10002 | 10003 -61805 | 10003 | 10004 -; - -topListMultipleStats -required_capability: agg_top_list -FROM employees -| STATS top_salary = TOP_LIST(salary, 1, "desc") by emp_no -| STATS top_salary = TOP_LIST(top_salary, 3, "asc") -; - -top_salary:integer -[25324,25945,25976] -; - -topListAllTypesMin -required_capability: agg_top_list -FROM employees -| STATS - date = TOP_LIST(hire_date, 1, "asc"), - double = TOP_LIST(salary_change, 1, "asc"), - integer = TOP_LIST(salary, 1, "asc"), - long = TOP_LIST(salary_change.long, 1, "asc") -; - -date:date | double:double | integer:integer | long:long -1985-02-18T00:00:00.000Z | -9.81 | 25324 | -9 -; - -topListAllTypesMax -required_capability: agg_top_list -FROM employees -| STATS - date = TOP_LIST(hire_date, 1, "desc"), - double = TOP_LIST(salary_change, 1, "desc"), - integer = TOP_LIST(salary, 1, "desc"), - long = TOP_LIST(salary_change.long, 1, "desc") -; - -date:date | double:double | integer:integer | long:long -1999-04-30T00:00:00.000Z | 14.74 | 74999 | 14 -; - -topListAscDesc -required_capability: agg_top_list -FROM employees -| STATS top_asc = TOP_LIST(salary, 3, "asc"), top_desc = TOP_LIST(salary, 3, "desc") -; - -top_asc:integer | top_desc:integer -[25324, 25945, 25976] | [74999, 74970, 74572] -; - -topListEmpty -required_capability: agg_top_list -FROM employees -| WHERE salary < 0 -| STATS top = TOP_LIST(salary, 3, "asc") -; - -top:integer -null -; - -topListDuplicates -required_capability: agg_top_list -FROM employees -| STATS integer = TOP_LIST(languages, 2, "desc") -; - -integer:integer -[5, 5] -; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 53d7d1fd0d352..f85a3bb01ad40 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -28,8 +28,9 @@ a:integer | b:integer ; -length -from employees | sort emp_no | limit 3 | eval l = length(first_name) | keep emp_no, l; +lengthAndSourceQuoting +required_capability: double_quotes_source_enclosing +from "employees" | sort emp_no | limit 3 | eval l = length(first_name) | keep emp_no, l; emp_no:integer | l:integer 10001 | 6 @@ -79,10 +80,11 @@ a:integer | ss:keyword | l:keyword | r:keyword 1 | bcd | ab | cd ; -stringCastEmp +stringCastEmpAndSourceTripleQuoting required_capability: string_literal_auto_casting +required_capability: double_quotes_source_enclosing -from employees +from """employees""" | eval ss = substring(first_name, "2") | sort emp_no | keep emp_no, first_name, ss @@ -342,8 +344,8 @@ required_capability: mv_warn from employees | where job_positions in ("Internship", first_name) | keep emp_no, job_positions; ignoreOrder:true -warning:Line 1:24: evaluation of [job_positions in (\"Internship\", first_name)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions in \(\\\"Internship\\\", first_name\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value emp_no:integer |job_positions:keyword 10048 |Internship @@ -533,8 +535,8 @@ lessThanMultivalue required_capability: mv_warn from employees | where job_positions < "C" | keep emp_no, job_positions | sort emp_no; -warning:Line 1:24: evaluation of [job_positions < \"C\"] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions < \\\"C\\\"\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword @@ -546,8 +548,8 @@ greaterThanMultivalue required_capability: mv_warn from employees | where job_positions > "C" | keep emp_no, job_positions | sort emp_no | limit 6; -warning:Line 1:24: evaluation of [job_positions > \"C\"] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions > \\\"C\\\"\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword @@ -563,8 +565,8 @@ equalToMultivalue required_capability: mv_warn from employees | where job_positions == "Accountant" | keep emp_no, job_positions | sort emp_no; -warning:Line 1:24: evaluation of [job_positions == \"Accountant\"] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions == \\\"Accountant\\\"\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword @@ -575,8 +577,8 @@ equalToOrEqualToMultivalue required_capability: mv_warn from employees | where job_positions == "Accountant" or job_positions == "Tech Lead" | keep emp_no, job_positions | sort emp_no; -warning:Line 1:24: evaluation of [job_positions] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword @@ -588,8 +590,8 @@ inMultivalue required_capability: mv_warn from employees | where job_positions in ("Accountant", "Tech Lead") | keep emp_no, job_positions | sort emp_no; -warning:Line 1:24: evaluation of [job_positions in (\"Accountant\", \"Tech Lead\")] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions in \(\\\"Accountant\\\", \\"Tech Lead\\\"\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword @@ -601,8 +603,8 @@ notLessThanMultivalue required_capability: mv_warn from employees | where not(job_positions < "C") | keep emp_no, job_positions | sort emp_no | limit 6; -warning:Line 1:24: evaluation of [not(job_positions < \"C\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions < \"C\"] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*job_positions < \\\"C\\\".*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword @@ -618,8 +620,8 @@ notGreaterThanMultivalue required_capability: mv_warn from employees | where not(job_positions > "C") | keep emp_no, job_positions | sort emp_no | limit 6; -warning:Line 1:24: evaluation of [not(job_positions > \"C\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions > \"C\"] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*job_positions > \\\"C\\\".*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword @@ -631,8 +633,8 @@ notEqualToMultivalue required_capability: mv_warn from employees | where not(job_positions == "Accountant") | keep emp_no, job_positions | sort emp_no | limit 6; -warning:Line 1:24: evaluation of [not(job_positions == \"Accountant\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions == \"Accountant\"] failed, treating result as null. Only first 20 failures recorded.] -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value#[Emulated:Line 1:28: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[.*job_positions == \\\"Accountant\\\".*\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value // Note that multivalued job_positions aren't included because they aren't less than or greater than C - that comparison is null emp_no:integer |job_positions:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index ee8c4be385e0f..349f968666132 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -45,8 +45,10 @@ FROM sample_data_ts_long ; singleIndexIpStats +required_capability: casting_operator + FROM sample_data -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | STATS count=count(*) BY client_ip | SORT count DESC, client_ip ASC | KEEP count, client_ip @@ -60,8 +62,10 @@ count:long | client_ip:ip ; singleIndexIpStringStats +required_capability: casting_operator + FROM sample_data_str -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | STATS count=count(*) BY client_ip | SORT count DESC, client_ip ASC | KEEP count, client_ip @@ -74,12 +78,28 @@ count:long | client_ip:ip 1 | 172.21.2.162 ; +singleIndexIpStringStatsInline +required_capability: casting_operator + +FROM sample_data_str +| STATS count=count(*) BY client_ip::ip +| STATS mc=count(count) BY count +| SORT mc DESC, count ASC +| KEEP mc, count +; + +mc:l | count:l +3 | 1 +1 | 4 +; + multiIndexIpString required_capability: union_types required_capability: metadata_fields +required_capability: casting_operator FROM sample_data, sample_data_str METADATA _index -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | KEEP _index, @timestamp, client_ip, event_duration, message | SORT _index ASC, @timestamp DESC ; @@ -104,9 +124,10 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexIpStringRename required_capability: union_types required_capability: metadata_fields +required_capability: casting_operator FROM sample_data, sample_data_str METADATA _index -| EVAL host_ip = TO_IP(client_ip) +| EVAL host_ip = client_ip::ip | KEEP _index, @timestamp, host_ip, event_duration, message | SORT _index ASC, @timestamp DESC ; @@ -191,9 +212,10 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 3450233 | Connected multiIndexIpStringStats required_capability: union_types +required_capability: casting_operator FROM sample_data, sample_data_str -| EVAL client_ip = TO_IP(client_ip) +| EVAL client_ip = client_ip::ip | STATS count=count(*) BY client_ip | SORT count DESC, client_ip ASC | KEEP count, client_ip @@ -208,9 +230,10 @@ count:long | client_ip:ip multiIndexIpStringRenameStats required_capability: union_types +required_capability: casting_operator FROM sample_data, sample_data_str -| EVAL host_ip = TO_IP(client_ip) +| EVAL host_ip = client_ip::ip | STATS count=count(*) BY host_ip | SORT count DESC, host_ip ASC | KEEP count, host_ip @@ -240,6 +263,24 @@ count:long | host_ip:keyword 2 | 172.21.2.162 ; +multiIndexIpStringStatsDrop +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_str +| STATS count=count(*) BY client_ip::ip +| KEEP count +| SORT count DESC +; + +count:long +8 +2 +2 +2 +; + multiIndexIpStringStatsInline required_capability: union_types required_capability: union_types_inline_fix @@ -257,6 +298,39 @@ count:long | client_ip:ip 2 | 172.21.2.162 ; +multiIndexIpStringStatsInline2 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_str +| STATS count=count(*) BY client_ip::ip +| SORT count DESC, `client_ip::ip` ASC +; + +count:long | client_ip::ip:ip +8 | 172.21.3.15 +2 | 172.21.0.5 +2 | 172.21.2.113 +2 | 172.21.2.162 +; + +multiIndexIpStringStatsInline3 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_str +| STATS count=count(*) BY client_ip::ip +| STATS mc=count(count) BY count +| SORT mc DESC, count ASC +; + +mc:l | count:l +3 | 2 +1 | 8 +; + multiIndexWhereIpStringStats required_capability: union_types @@ -385,6 +459,76 @@ count:long | @timestamp:date 4 | 2023-10-23T12:00:00.000Z ; +multiIndexTsLongStatsDrop +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_ts_long +| STATS count=count(*) BY @timestamp::datetime +| KEEP count +; + +count:long +2 +2 +2 +2 +2 +2 +2 +; + +multiIndexTsLongStatsInline2 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_ts_long +| STATS count=count(*) BY @timestamp::datetime +| SORT count DESC, `@timestamp::datetime` DESC +; + +count:long | @timestamp::datetime:datetime +2 | 2023-10-23T13:55:01.543Z +2 | 2023-10-23T13:53:55.832Z +2 | 2023-10-23T13:52:55.015Z +2 | 2023-10-23T13:51:54.732Z +2 | 2023-10-23T13:33:34.937Z +2 | 2023-10-23T12:27:28.948Z +2 | 2023-10-23T12:15:03.360Z +; + +multiIndexTsLongStatsInline3 +required_capability: union_types +required_capability: union_types_agg_cast +required_capability: casting_operator + +FROM sample_data, sample_data_ts_long +| STATS count=count(*) BY @timestamp::datetime +| STATS mc=count(count) BY count +| SORT mc DESC, count ASC +; + +mc:l | count:l +7 | 2 +; + +multiIndexTsLongStatsStats +required_capability: union_types +required_capability: union_types_agg_cast + +FROM sample_data, sample_data_ts_long +| EVAL ts = TO_STRING(@timestamp) +| STATS count = COUNT(*) BY ts +| STATS mc = COUNT(count) BY count +| SORT mc DESC, count ASC +; + +mc:l | count:l +14 | 1 +; + multiIndexTsLongRenameStats required_capability: union_types @@ -717,3 +861,37 @@ null | null | 8268153 | Connection error | samp null | null | 8268153 | Connection error | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 null | null | 8268153 | Connection error | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 ; + +multiIndexMultiColumnTypesRenameAndKeep +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| WHERE event_duration > 8000000 +| EVAL ts = TO_DATETIME(@timestamp), ts_str = TO_STRING(@timestamp), ts_l = TO_LONG(@timestamp), ip = TO_IP(client_ip), ip_str = TO_STRING(client_ip) +| KEEP _index, ts, ts_str, ts_l, ip, ip_str, event_duration +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k | event_duration:long +sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +; + +multiIndexMultiColumnTypesRenameAndDrop +required_capability: union_types +required_capability: metadata_fields + +FROM sample_data* METADATA _index +| WHERE event_duration > 8000000 +| EVAL ts = TO_DATETIME(@timestamp), ts_str = TO_STRING(@timestamp), ts_l = TO_LONG(@timestamp), ip = TO_IP(client_ip), ip_str = TO_STRING(client_ip) +| DROP @timestamp, client_ip, message +| SORT _index ASC, ts DESC +; + +event_duration:long | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k +8268153 | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec index 38f3d439e7504..03d0b71894d9b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec @@ -49,8 +49,8 @@ filterPushDownGT required_capability: mv_warn from ul_logs | where bytes_in >= to_ul(74330435873664882) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; -warning:Line 1:22: evaluation of [bytes_in >= to_ul(74330435873664882)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[bytes_in >= to_ul\(74330435873664882\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value bytes_in:ul | div:ul |id:i 74330435873664882 |74 |82 @@ -71,10 +71,8 @@ filterPushDownRange required_capability: mv_warn from ul_logs | where bytes_in >= to_ul(74330435873664882) | where bytes_in <= to_ul(316080452389500167) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; -warning:Line 1:22: evaluation of [bytes_in >= to_ul(74330435873664882)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value -warning:#[Emulated:Line 1:67: evaluation of [bytes_in <= to_ul(316080452389500167)] failed, treating result as null. Only first 20 failures recorded.] -warning:#[Emulated:Line 1:67: java.lang.IllegalArgumentException: single-value function encountered multi-value] +warningRegex:evaluation of \[bytes_in .* to_ul\(.*\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value bytes_in:ul | div:ul |id:i 74330435873664882 |74 |82 @@ -88,8 +86,8 @@ required_capability: mv_warn // TODO: testing framework doesn't perform implicit conversion to UL of given values, needs explicit conversion from ul_logs | where bytes_in in (to_ul(74330435873664882), to_ul(154551962150890564), to_ul(195161570976258241)) | sort bytes_in | keep bytes_in, id; -warning:Line 1:22: evaluation of [bytes_in in (to_ul(74330435873664882), to_ul(154551962150890564), to_ul(195161570976258241))] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[bytes_in in \(to_ul\(74330435873664882\), to_ul\(154551962150890564\), to_ul\(195161570976258241\)\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value bytes_in:ul |id:i 74330435873664882 |82 @@ -101,8 +99,8 @@ filterOnFieldsEquality required_capability: mv_warn from ul_logs | where bytes_in == bytes_out; -warning:Line 1:22: evaluation of [bytes_in == bytes_out] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[bytes_in == bytes_out\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value @timestamp:date | bytes_in:ul | bytes_out:ul | id:i | status:k 2017-11-10T21:12:17.000Z|16002960716282089759|16002960716282089759|34 |OK @@ -112,8 +110,8 @@ filterOnFieldsInequality required_capability: mv_warn from ul_logs | sort id | where bytes_in < bytes_out | eval b_in = bytes_in / to_ul(pow(10.,15)), b_out = bytes_out / to_ul(pow(10.,15)) | limit 5; -warning:Line 1:32: evaluation of [bytes_in < bytes_out] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:32: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[bytes_in < bytes_out\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value @timestamp:date | bytes_in:ul | bytes_out:ul | id:i | status:k | b_in:ul | b_out:ul 2017-11-10T21:15:54.000Z|4348801185987554667 |12749081495402663265|1 |OK |4348 |12749 @@ -143,8 +141,8 @@ case required_capability: mv_warn from ul_logs | where case(bytes_in == to_ul(154551962150890564), true, false); -warning:Line 1:27: evaluation of [bytes_in == to_ul(154551962150890564)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:27: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[bytes_in == to_ul\(154551962150890564\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value @timestamp:date | bytes_in:ul | bytes_out:ul | id:i | status:k 2017-11-10T20:21:58.000Z|154551962150890564|9382204513185396493|63 |OK @@ -155,8 +153,8 @@ required_capability: mv_warn FROM ul_logs | WHERE bytes_in == bytes_out | EVAL deg = TO_DEGREES(bytes_in) | KEEP bytes_in, deg ; -warning:Line 1:22: evaluation of [bytes_in == bytes_out] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[bytes_in == bytes_out\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value bytes_in:ul | deg:double 16002960716282089759 | 9.169021087566165E20 @@ -167,8 +165,8 @@ required_capability: mv_warn FROM ul_logs | WHERE bytes_in == bytes_out | EVAL rad = TO_RADIANS(bytes_in) | KEEP bytes_in, rad ; -warning:Line 1:22: evaluation of [bytes_in == bytes_out] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:22: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[bytes_in == bytes_out\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value bytes_in:ul | rad:double 16002960716282089759 | 2.79304354566432608E17 @@ -197,8 +195,8 @@ keep s, bytes_in, bytes_out | sort bytes_out, s | limit 2; -warning:Line 2:7: evaluation of [signum(bytes_in)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 2:7: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[signum\(bytes_in\)\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value s:double | bytes_in:ul | bytes_out:ul 1.0 | 1957665857956635540 | 352442273299370793 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec index 160fc46dafcf2..2a62117be8169 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec @@ -302,8 +302,8 @@ FROM sample_data multiValueLike#[skip:-8.12.99] from employees | where job_positions like "Account*" | keep emp_no, job_positions; -warning:Line 1:24: evaluation of [job_positions like \"Account*\"] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions like \\\"Account\*\\\"\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value emp_no:integer | job_positions:keyword 10025 | Accountant @@ -313,8 +313,8 @@ emp_no:integer | job_positions:keyword multiValueRLike#[skip:-8.12.99] from employees | where job_positions rlike "Account.*" | keep emp_no, job_positions; -warning:Line 1:24: evaluation of [job_positions rlike \"Account.*\"] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value +warningRegex:evaluation of \[job_positions rlike \\\"Account.*\\\"\] failed, treating result as null. Only first 20 failures recorded. +warningRegex:java.lang.IllegalArgumentException: single-value function encountered multi-value emp_no:integer | job_positions:keyword 10025 | Accountant diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java index 22e3de8499bc1..84738f733f86b 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -44,7 +45,11 @@ public void ensureExchangesAreReleased() throws Exception { for (String node : internalCluster().getNodeNames()) { TransportEsqlQueryAction esqlQueryAction = internalCluster().getInstance(TransportEsqlQueryAction.class, node); ExchangeService exchangeService = esqlQueryAction.exchangeService(); - assertBusy(() -> assertTrue("Leftover exchanges " + exchangeService + " on node " + node, exchangeService.isEmpty())); + assertBusy(() -> { + if (exchangeService.lifecycleState() == Lifecycle.State.STARTED) { + assertTrue("Leftover exchanges " + exchangeService + " on node " + node, exchangeService.isEmpty()); + } + }); } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java index 54c5e8511426a..f85de51101af5 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.hamcrest.core.IsEqual; @@ -55,7 +54,7 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder() - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .build(); } @@ -90,7 +89,7 @@ public void testBasicAsyncExecution() throws Exception { try (var finalResponse = future.get()) { assertThat(finalResponse, notNullValue()); assertThat(finalResponse.isRunning(), is(false)); - assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfoImpl("sum(pause_me)", "long")))); assertThat(getValuesList(finalResponse).size(), equalTo(1)); } @@ -99,7 +98,7 @@ public void testBasicAsyncExecution() throws Exception { try (var finalResponse = again.get()) { assertThat(finalResponse, notNullValue()); assertThat(finalResponse.isRunning(), is(false)); - assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfoImpl("sum(pause_me)", "long")))); assertThat(getValuesList(finalResponse).size(), equalTo(1)); } @@ -174,7 +173,7 @@ private void testFinishingBeforeTimeout(boolean keepOnCompletion) { try (var response = request.execute().actionGet(60, TimeUnit.SECONDS)) { assertThat(response.isRunning(), is(false)); - assertThat(response.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(response.columns(), equalTo(List.of(new ColumnInfoImpl("sum(pause_me)", "long")))); assertThat(getValuesList(response).size(), equalTo(1)); if (keepOnCompletion) { @@ -187,7 +186,7 @@ private void testFinishingBeforeTimeout(boolean keepOnCompletion) { try (var resp = future.actionGet(60, TimeUnit.SECONDS)) { assertThat(resp.asyncExecutionId().get(), equalTo(id)); assertThat(resp.isRunning(), is(false)); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("sum(pause_me)", "long")))); assertThat(getValuesList(resp).size(), equalTo(1)); } } else { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java index 800067fef8b1c..df6a1e00b0212 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -68,7 +68,7 @@ public List> getSettings() { return List.of( Setting.timeSetting( ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, - TimeValue.timeValueMillis(between(1000, 3000)), + TimeValue.timeValueMillis(between(3000, 4000)), Setting.Property.NodeScope ) ); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java index 5806cb8ef0982..cdfa6eb2d03f3 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EnrichIT.java @@ -40,9 +40,9 @@ import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.enrich.EnrichPlugin; import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.After; @@ -111,7 +111,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getDefault(Settings.EMPTY) ) - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 256))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 1024))) // allow reading pages from network can trip the circuit breaker @@ -226,12 +226,12 @@ private static String enrichSongCommand() { public void testSumDurationByArtist() { Function> extractStats = resp -> { - List columns = resp.columns(); + List columns = resp.columns(); assertThat(columns, hasSize(2)); assertThat(columns.get(0).name(), equalTo("sum(duration)")); - assertThat(columns.get(0).type(), equalTo("double")); + assertThat(columns.get(0).type(), equalTo(DataType.DOUBLE)); assertThat(columns.get(1).name(), equalTo("artist")); - assertThat(columns.get(1).type(), equalTo("keyword")); + assertThat(columns.get(1).type(), equalTo(DataType.KEYWORD)); Iterator> rows = resp.values(); Map actualValues = new HashMap<>(); while (rows.hasNext()) { @@ -256,12 +256,12 @@ public void testSumDurationByArtist() { public void testAvgDurationByArtist() { Function> extractStats = resp -> { - List columns = resp.columns(); + List columns = resp.columns(); assertThat(columns, hasSize(2)); assertThat(columns.get(0).name(), equalTo("avg(duration)")); - assertThat(columns.get(0).type(), equalTo("double")); + assertThat(columns.get(0).type(), equalTo(DataType.DOUBLE)); assertThat(columns.get(1).name(), equalTo("artist")); - assertThat(columns.get(1).type(), equalTo("keyword")); + assertThat(columns.get(1).type(), equalTo(DataType.KEYWORD)); Iterator> rows = resp.values(); Map actualValues = new HashMap<>(); while (rows.hasNext()) { @@ -282,12 +282,12 @@ public void testAvgDurationByArtist() { public void testListeningRatio() { Function> extractStats = resp -> { - List columns = resp.columns(); + List columns = resp.columns(); assertThat(columns, hasSize(2)); assertThat(columns.get(0).name(), equalTo("ratio")); - assertThat(columns.get(0).type(), equalTo("double")); + assertThat(columns.get(0).type(), equalTo(DataType.DOUBLE)); assertThat(columns.get(1).name(), equalTo("artist")); - assertThat(columns.get(1).type(), equalTo("keyword")); + assertThat(columns.get(1).type(), equalTo(DataType.KEYWORD)); Iterator> rows = resp.values(); Map actualValues = new HashMap<>(); while (rows.hasNext()) { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java index 089cb4a9a5084..37833d8aed2d3 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionBreakerIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteResponse; @@ -35,7 +34,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105543") @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") public class EsqlActionBreakerIT extends EsqlActionIT { @@ -72,7 +70,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getDefault(Settings.EMPTY) ) - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 256))) .put(BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofBytes(between(0, 1024))) // allow reading pages from network can trip the circuit breaker diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 686fb831aa042..0ec2f0da2d2a6 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; @@ -100,7 +101,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testProjectConstant() { try (EsqlQueryResponse results = run("from test | eval x = 1 | keep x")) { - assertThat(results.columns(), equalTo(List.of(new ColumnInfo("x", "integer")))); + assertThat(results.columns(), equalTo(List.of(new ColumnInfoImpl("x", "integer")))); assertThat(getValuesList(results).size(), equalTo(40)); assertThat(getValuesList(results).get(0).get(0), equalTo(1)); } @@ -108,7 +109,7 @@ public void testProjectConstant() { public void testStatsOverConstant() { try (EsqlQueryResponse results = run("from test | eval x = 1 | stats x = count(x)")) { - assertThat(results.columns(), equalTo(List.of(new ColumnInfo("x", "long")))); + assertThat(results.columns(), equalTo(List.of(new ColumnInfoImpl("x", "long")))); assertThat(getValuesList(results).size(), equalTo(1)); assertThat(getValuesList(results).get(0).get(0), equalTo(40L)); } @@ -139,12 +140,12 @@ private void testFromStatsGroupingAvgImpl(String command, String expectedGroupNa assertEquals(2, results.columns().size()); // assert column metadata - ColumnInfo valuesColumn = results.columns().get(0); + ColumnInfoImpl valuesColumn = results.columns().get(0); assertEquals(expectedFieldName, valuesColumn.name()); - assertEquals("double", valuesColumn.type()); - ColumnInfo groupColumn = results.columns().get(1); + assertEquals(DataType.DOUBLE, valuesColumn.type()); + ColumnInfoImpl groupColumn = results.columns().get(1); assertEquals(expectedGroupName, groupColumn.name()); - assertEquals("long", groupColumn.type()); + assertEquals(DataType.LONG, groupColumn.type()); // assert column values List> valueValues = getValuesList(results); @@ -178,12 +179,12 @@ private void testFromStatsGroupingCountImpl(String command, String expectedField assertEquals(2, results.columns().size()); // assert column metadata - ColumnInfo groupColumn = results.columns().get(0); + ColumnInfoImpl groupColumn = results.columns().get(0); assertEquals(expectedGroupName, groupColumn.name()); - assertEquals("long", groupColumn.type()); - ColumnInfo valuesColumn = results.columns().get(1); + assertEquals(DataType.LONG, groupColumn.type()); + ColumnInfoImpl valuesColumn = results.columns().get(1); assertEquals(expectedFieldName, valuesColumn.name()); - assertEquals("long", valuesColumn.type()); + assertEquals(DataType.LONG, valuesColumn.type()); // assert column values List> valueValues = getValuesList(results); @@ -212,9 +213,9 @@ public void testFromStatsGroupingByDate() { // assert column metadata assertEquals("avg(count)", results.columns().get(0).name()); - assertEquals("double", results.columns().get(0).type()); + assertEquals(DataType.DOUBLE, results.columns().get(0).type()); assertEquals("time", results.columns().get(1).name()); - assertEquals("long", results.columns().get(1).type()); + assertEquals(DataType.LONG, results.columns().get(1).type()); // assert column values List expectedValues = LongStream.range(0, 40).map(i -> epoch + i).sorted().boxed().toList(); @@ -244,9 +245,9 @@ public void testFromGroupingByNumericFieldWithNulls() { assertThat(results.columns(), hasSize(2)); assertEquals("avg(count)", results.columns().get(0).name()); - assertEquals("double", results.columns().get(0).type()); + assertEquals(DataType.DOUBLE, results.columns().get(0).type()); assertEquals("data", results.columns().get(1).name()); - assertEquals("long", results.columns().get(1).type()); + assertEquals(DataType.LONG, results.columns().get(1).type()); record Group(Long data, Double avg) {} List expectedGroups = List.of(new Group(1L, 42.0), new Group(2L, 44.0), new Group(99L, null), new Group(null, 12.0)); @@ -263,9 +264,9 @@ public void testFromStatsGroupingByKeyword() { // assert column metadata assertEquals("avg(count)", results.columns().get(0).name()); - assertEquals("double", results.columns().get(0).type()); + assertEquals(DataType.DOUBLE, results.columns().get(0).type()); assertEquals("color", results.columns().get(1).name()); - assertEquals("keyword", results.columns().get(1).type()); + assertEquals(DataType.KEYWORD, results.columns().get(1).type()); record Group(String color, double avg) { } @@ -298,9 +299,9 @@ public void testFromStatsGroupingByKeywordWithNulls() { // assert column metadata assertEquals("avg", results.columns().get(0).name()); - assertEquals("double", results.columns().get(0).type()); + assertEquals(DataType.DOUBLE, results.columns().get(0).type()); assertEquals("color", results.columns().get(1).name()); - assertEquals("keyword", results.columns().get(1).type()); + assertEquals(DataType.KEYWORD, results.columns().get(1).type()); record Group(String color, Double avg) { } @@ -332,17 +333,17 @@ public void testFromStatsMultipleAggs() { // assert column metadata assertEquals("a", results.columns().get(0).name()); - assertEquals("double", results.columns().get(0).type()); + assertEquals(DataType.DOUBLE, results.columns().get(0).type()); assertEquals("mi", results.columns().get(1).name()); - assertEquals("long", results.columns().get(1).type()); + assertEquals(DataType.LONG, results.columns().get(1).type()); assertEquals("ma", results.columns().get(2).name()); - assertEquals("long", results.columns().get(2).type()); + assertEquals(DataType.LONG, results.columns().get(2).type()); assertEquals("s", results.columns().get(3).name()); - assertEquals("long", results.columns().get(3).type()); + assertEquals(DataType.LONG, results.columns().get(3).type()); assertEquals("c", results.columns().get(4).name()); - assertEquals("long", results.columns().get(4).type()); + assertEquals(DataType.LONG, results.columns().get(4).type()); assertEquals("color", results.columns().get(5).name()); - assertEquals("keyword", results.columns().get(5).type()); + assertEquals(DataType.KEYWORD, results.columns().get(5).type()); record Group(double avg, long mi, long ma, long s, long c, String color) {} List expectedGroups = List.of( new Group(42, 42, 42, 420, 10, "blue"), @@ -380,7 +381,7 @@ public void testFromStatsProjectGroup() { try (EsqlQueryResponse results = run("from test | stats avg_count = avg(count) by data | keep data")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("data")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("long")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.LONG)); assertThat(getValuesList(results), containsInAnyOrder(List.of(1L), List.of(2L))); } } @@ -389,7 +390,7 @@ public void testRowStatsProjectGroupByInt() { try (EsqlQueryResponse results = run("row a = 1, b = 2 | stats count(b) by a | keep a")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("integer")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.INTEGER)); assertThat(getValuesList(results), contains(List.of(1))); } } @@ -398,7 +399,7 @@ public void testRowStatsProjectGroupByLong() { try (EsqlQueryResponse results = run("row a = 1000000000000, b = 2 | stats count(b) by a | keep a")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("long")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.LONG)); assertThat(getValuesList(results), contains(List.of(1000000000000L))); } } @@ -407,7 +408,7 @@ public void testRowStatsProjectGroupByDouble() { try (EsqlQueryResponse results = run("row a = 1.0, b = 2 | stats count(b) by a | keep a")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.DOUBLE)); assertThat(getValuesList(results), contains(List.of(1.0))); } } @@ -416,7 +417,7 @@ public void testRowStatsProjectGroupByKeyword() { try (EsqlQueryResponse results = run("row a = \"hello\", b = 2 | stats count(b) by a | keep a")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("keyword")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.KEYWORD)); assertThat(getValuesList(results), contains(List.of("hello"))); } } @@ -425,7 +426,7 @@ public void testFromStatsProjectGroupByDouble() { try (EsqlQueryResponse results = run("from test | stats count(count) by data_d | keep data_d")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("data_d")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.DOUBLE)); assertThat(getValuesList(results), containsInAnyOrder(List.of(1.0), List.of(2.0))); } } @@ -435,7 +436,7 @@ public void testFromStatsProjectGroupWithAlias() { try (EsqlQueryResponse results = run(query)) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("d", "d2")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("long", "long")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.LONG, DataType.LONG)); assertThat(getValuesList(results), containsInAnyOrder(List.of(1L, 1L), List.of(2L, 2L))); } } @@ -444,7 +445,7 @@ public void testFromStatsProjectAgg() { try (EsqlQueryResponse results = run("from test | stats a = avg(count) by data | keep a")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.DOUBLE)); assertThat(getValuesList(results), containsInAnyOrder(List.of(42d), List.of(44d))); } } @@ -453,7 +454,7 @@ public void testFromStatsProjectAggWithAlias() { try (EsqlQueryResponse results = run("from test | stats a = avg(count) by data | rename a as b | keep b")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("b")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.DOUBLE)); assertThat(getValuesList(results), containsInAnyOrder(List.of(42d), List.of(44d))); } } @@ -462,7 +463,7 @@ public void testFromProjectStatsGroupByAlias() { try (EsqlQueryResponse results = run("from test | rename data as d | keep d, count | stats avg(count) by d")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("avg(count)", "d")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double", "long")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.DOUBLE, DataType.LONG)); assertThat(getValuesList(results), containsInAnyOrder(List.of(42d, 1L), List.of(44d, 2L))); } } @@ -471,7 +472,7 @@ public void testFromProjectStatsAggregateAlias() { try (EsqlQueryResponse results = run("from test | rename count as c | keep c, data | stats avg(c) by data")) { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("avg(c)", "data")); - assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double", "long")); + assertThat(results.columns().stream().map(ColumnInfoImpl::type).toList(), contains(DataType.DOUBLE, DataType.LONG)); assertThat(getValuesList(results), containsInAnyOrder(List.of(42d, 1L), List.of(44d, 2L))); } } @@ -482,7 +483,7 @@ public void testFromEvalStats() { assertEquals(1, results.columns().size()); assertEquals(1, getValuesList(results).size()); assertEquals("avg(ratio)", results.columns().get(0).name()); - assertEquals("double", results.columns().get(0).type()); + assertEquals(DataType.DOUBLE, results.columns().get(0).type()); assertEquals(1, getValuesList(results).get(0).size()); assertEquals(0.034d, (double) getValuesList(results).get(0).get(0), 0.001d); } @@ -494,7 +495,7 @@ public void testUngroupedCountAll() { assertEquals(1, results.columns().size()); assertEquals(1, getValuesList(results).size()); assertEquals("count(*)", results.columns().get(0).name()); - assertEquals("long", results.columns().get(0).type()); + assertEquals(DataType.LONG, results.columns().get(0).type()); var values = getValuesList(results).get(0); assertEquals(1, values.size()); assertEquals(40, (long) values.get(0)); @@ -507,7 +508,7 @@ public void testUngroupedCountAllWithFilter() { assertEquals(1, results.columns().size()); assertEquals(1, getValuesList(results).size()); assertEquals("count(*)", results.columns().get(0).name()); - assertEquals("long", results.columns().get(0).type()); + assertEquals(DataType.LONG, results.columns().get(0).type()); var values = getValuesList(results).get(0); assertEquals(1, values.size()); assertEquals(20, (long) values.get(0)); @@ -520,9 +521,9 @@ public void testGroupedCountAllWithFilter() { assertEquals(2, results.columns().size()); assertEquals(1, getValuesList(results).size()); assertEquals("count(*)", results.columns().get(0).name()); - assertEquals("long", results.columns().get(0).type()); + assertEquals(DataType.LONG, results.columns().get(0).type()); assertEquals("data", results.columns().get(1).name()); - assertEquals("long", results.columns().get(1).type()); + assertEquals(DataType.LONG, results.columns().get(1).type()); var values = getValuesList(results).get(0); assertEquals(2, values.size()); assertEquals(20, (long) values.get(0)); @@ -536,10 +537,10 @@ public void testFromStatsEvalWithPragma() { logger.info(results); assertEquals(1, getValuesList(results).size()); assertEquals(2, getValuesList(results).get(0).size()); - assertEquals(50, (double) getValuesList(results).get(0).get(results.columns().indexOf(new ColumnInfo("x", "double"))), 1d); + assertEquals(50, (double) getValuesList(results).get(0).get(results.columns().indexOf(new ColumnInfoImpl("x", "double"))), 1d); assertEquals( 43, - (double) getValuesList(results).get(0).get(results.columns().indexOf(new ColumnInfo("avg_count", "double"))), + (double) getValuesList(results).get(0).get(results.columns().indexOf(new ColumnInfoImpl("avg_count", "double"))), 1d ); } @@ -549,7 +550,7 @@ public void testWhere() { try (EsqlQueryResponse results = run("from test | where count > 40")) { logger.info(results); assertEquals(30, getValuesList(results).size()); - var countIndex = results.columns().indexOf(new ColumnInfo("count", "long")); + var countIndex = results.columns().indexOf(new ColumnInfoImpl("count", "long")); for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThan(40L)); } @@ -560,7 +561,7 @@ public void testProjectWhere() { try (EsqlQueryResponse results = run("from test | keep count | where count > 40")) { logger.info(results); assertEquals(30, getValuesList(results).size()); - int countIndex = results.columns().indexOf(new ColumnInfo("count", "long")); + int countIndex = results.columns().indexOf(new ColumnInfoImpl("count", "long")); for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThan(40L)); } @@ -571,7 +572,7 @@ public void testEvalWhere() { try (EsqlQueryResponse results = run("from test | eval x = count / 2 | where x > 20")) { logger.info(results); assertEquals(30, getValuesList(results).size()); - int countIndex = results.columns().indexOf(new ColumnInfo("x", "long")); + int countIndex = results.columns().indexOf(new ColumnInfoImpl("x", "long")); for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThan(20L)); } @@ -589,7 +590,7 @@ public void testStringLength() { try (EsqlQueryResponse results = run("from test | eval l = length(color)")) { logger.info(results); assertThat(getValuesList(results), hasSize(40)); - int countIndex = results.columns().indexOf(new ColumnInfo("l", "integer")); + int countIndex = results.columns().indexOf(new ColumnInfoImpl("l", "integer")); for (List values : getValuesList(results)) { assertThat((Integer) values.get(countIndex), greaterThanOrEqualTo(3)); } @@ -608,11 +609,11 @@ public void testFilterWithNullAndEvalFromIndex() { try (EsqlQueryResponse results = run("from test | eval newCount = count + 1 | where newCount > 1")) { logger.info(results); assertEquals(40, getValuesList(results).size()); - assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("count", "long")))); - assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("count_d", "double")))); - assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("data", "long")))); - assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("data_d", "double")))); - assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("time", "long")))); + assertThat(results.columns(), hasItem(equalTo(new ColumnInfoImpl("count", "long")))); + assertThat(results.columns(), hasItem(equalTo(new ColumnInfoImpl("count_d", "double")))); + assertThat(results.columns(), hasItem(equalTo(new ColumnInfoImpl("data", "long")))); + assertThat(results.columns(), hasItem(equalTo(new ColumnInfoImpl("data_d", "double")))); + assertThat(results.columns(), hasItem(equalTo(new ColumnInfoImpl("time", "long")))); } } @@ -646,7 +647,7 @@ public void testEvalOverride() { assertEquals(40, getValuesList(results).size()); assertEquals(1, results.columns().stream().filter(c -> c.name().equals("count")).count()); int countIndex = results.columns().size() - 1; - assertEquals(new ColumnInfo("count", "long"), results.columns().get(countIndex)); + assertEquals(new ColumnInfoImpl("count", "long"), results.columns().get(countIndex)); for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThanOrEqualTo(42L)); } @@ -657,7 +658,7 @@ public void testProjectRename() { try (var results = run("from test | eval y = count | rename count as x | keep x, y")) { logger.info(results); assertEquals(40, getValuesList(results).size()); - assertThat(results.columns(), contains(new ColumnInfo("x", "long"), new ColumnInfo("y", "long"))); + assertThat(results.columns(), contains(new ColumnInfoImpl("x", "long"), new ColumnInfoImpl("y", "long"))); for (List values : getValuesList(results)) { assertThat((Long) values.get(0), greaterThanOrEqualTo(40L)); assertThat(values.get(1), is(values.get(0))); @@ -672,10 +673,10 @@ public void testProjectRenameEval() { assertThat( results.columns(), contains( - new ColumnInfo("x", "long"), - new ColumnInfo("y", "long"), - new ColumnInfo("x2", "long"), - new ColumnInfo("y2", "long") + new ColumnInfoImpl("x", "long"), + new ColumnInfoImpl("y", "long"), + new ColumnInfoImpl("x2", "long"), + new ColumnInfoImpl("y2", "long") ) ); for (List values : getValuesList(results)) { @@ -691,7 +692,10 @@ public void testProjectRenameEvalProject() { try (var results = run("from test | eval y = count | rename count as x | keep x, y | eval z = x + y | keep x, y, z")) { logger.info(results); assertEquals(40, getValuesList(results).size()); - assertThat(results.columns(), contains(new ColumnInfo("x", "long"), new ColumnInfo("y", "long"), new ColumnInfo("z", "long"))); + assertThat( + results.columns(), + contains(new ColumnInfoImpl("x", "long"), new ColumnInfoImpl("y", "long"), new ColumnInfoImpl("z", "long")) + ); for (List values : getValuesList(results)) { assertThat((Long) values.get(0), greaterThanOrEqualTo(40L)); assertThat(values.get(1), is(values.get(0))); @@ -704,7 +708,7 @@ public void testProjectOverride() { try (var results = run("from test | eval cnt = count | rename count as data | keep cnt, data")) { logger.info(results); assertEquals(40, getValuesList(results).size()); - assertThat(results.columns(), contains(new ColumnInfo("cnt", "long"), new ColumnInfo("data", "long"))); + assertThat(results.columns(), contains(new ColumnInfoImpl("cnt", "long"), new ColumnInfoImpl("data", "long"))); for (List values : getValuesList(results)) { assertThat(values.get(1), is(values.get(0))); } @@ -865,7 +869,7 @@ public void testEvalWithNullAndAvg() { assertEquals(1, results.columns().size()); assertEquals(1, getValuesList(results).size()); assertEquals("avg(nullsum)", results.columns().get(0).name()); - assertEquals("double", results.columns().get(0).type()); + assertEquals(DataType.DOUBLE, results.columns().get(0).type()); assertEquals(1, getValuesList(results).get(0).size()); assertNull(getValuesList(results).get(0).get(0)); } @@ -874,7 +878,7 @@ public void testEvalWithNullAndAvg() { public void testFromStatsLimit() { try (EsqlQueryResponse results = run("from test | stats ac = avg(count) by data | limit 1")) { logger.info(results); - assertThat(results.columns(), contains(new ColumnInfo("ac", "double"), new ColumnInfo("data", "long"))); + assertThat(results.columns(), contains(new ColumnInfoImpl("ac", "double"), new ColumnInfoImpl("data", "long"))); assertThat(getValuesList(results), contains(anyOf(contains(42.0, 1L), contains(44.0, 2L)))); } } @@ -882,7 +886,7 @@ public void testFromStatsLimit() { public void testFromLimit() { try (EsqlQueryResponse results = run("from test | keep data | limit 2")) { logger.info(results); - assertThat(results.columns(), contains(new ColumnInfo("data", "long"))); + assertThat(results.columns(), contains(new ColumnInfoImpl("data", "long"))); assertThat(getValuesList(results), contains(anyOf(contains(1L), contains(2L)), anyOf(contains(1L), contains(2L)))); } } @@ -891,7 +895,7 @@ public void testDropAllColumns() { try (EsqlQueryResponse results = run("from test | keep data | drop data | eval a = 1")) { logger.info(results); assertThat(results.columns(), hasSize(1)); - assertThat(results.columns(), contains(new ColumnInfo("a", "integer"))); + assertThat(results.columns(), contains(new ColumnInfoImpl("a", "integer"))); assertThat(getValuesList(results), is(empty())); } } @@ -1010,7 +1014,7 @@ public void testErrorMessageForEmptyParams() { public void testEmptyIndex() { assertAcked(client().admin().indices().prepareCreate("test_empty").setMapping("k", "type=keyword", "v", "type=long").get()); try (EsqlQueryResponse results = run("from test_empty")) { - assertThat(results.columns(), equalTo(List.of(new ColumnInfo("k", "keyword"), new ColumnInfo("v", "long")))); + assertThat(results.columns(), equalTo(List.of(new ColumnInfoImpl("k", "keyword"), new ColumnInfoImpl("v", "long")))); assertThat(getValuesList(results), empty()); } } @@ -1019,7 +1023,13 @@ public void testShowInfo() { try (EsqlQueryResponse results = run("show info")) { assertThat( results.columns(), - equalTo(List.of(new ColumnInfo("version", "keyword"), new ColumnInfo("date", "keyword"), new ColumnInfo("hash", "keyword"))) + equalTo( + List.of( + new ColumnInfoImpl("version", "keyword"), + new ColumnInfoImpl("date", "keyword"), + new ColumnInfoImpl("hash", "keyword") + ) + ) ); assertThat(getValuesList(results).size(), equalTo(1)); assertThat(getValuesList(results).get(0).get(0), equalTo(Build.current().version())); @@ -1034,16 +1044,16 @@ public void testMetaFunctions() { results.columns(), equalTo( List.of( - new ColumnInfo("name", "keyword"), - new ColumnInfo("synopsis", "keyword"), - new ColumnInfo("argNames", "keyword"), - new ColumnInfo("argTypes", "keyword"), - new ColumnInfo("argDescriptions", "keyword"), - new ColumnInfo("returnType", "keyword"), - new ColumnInfo("description", "keyword"), - new ColumnInfo("optionalArgs", "boolean"), - new ColumnInfo("variadic", "boolean"), - new ColumnInfo("isAggregation", "boolean") + new ColumnInfoImpl("name", "keyword"), + new ColumnInfoImpl("synopsis", "keyword"), + new ColumnInfoImpl("argNames", "keyword"), + new ColumnInfoImpl("argTypes", "keyword"), + new ColumnInfoImpl("argDescriptions", "keyword"), + new ColumnInfoImpl("returnType", "keyword"), + new ColumnInfoImpl("description", "keyword"), + new ColumnInfoImpl("optionalArgs", "boolean"), + new ColumnInfoImpl("variadic", "boolean"), + new ColumnInfoImpl("isAggregation", "boolean") ) ) ); @@ -1053,7 +1063,7 @@ public void testMetaFunctions() { public void testInWithNullValue() { try (EsqlQueryResponse results = run("from test | where null in (data, 2) | keep data")) { - assertThat(results.columns(), equalTo(List.of(new ColumnInfo("data", "long")))); + assertThat(results.columns(), equalTo(List.of(new ColumnInfoImpl("data", "long")))); assertThat(getValuesList(results).size(), equalTo(0)); } } @@ -1088,11 +1098,11 @@ public void testTopNPushedToLucene() { // assert column metadata assertEquals("data", results.columns().get(0).name()); - assertEquals("long", results.columns().get(0).type()); + assertEquals(DataType.LONG, results.columns().get(0).type()); assertEquals("count", results.columns().get(1).name()); - assertEquals("long", results.columns().get(1).type()); + assertEquals(DataType.LONG, results.columns().get(1).type()); assertEquals("color", results.columns().get(2).name()); - assertEquals("keyword", results.columns().get(2).type()); + assertEquals(DataType.KEYWORD, results.columns().get(2).type()); record Group(Long data, Long count, String color) { Group(Long data, Long count) { this(data, count, "yellow"); @@ -1139,7 +1149,7 @@ public void testTopNPushedToLuceneOnSortedIndex() { // assert column metadata assertEquals("time", results.columns().get(0).name()); - assertEquals("long", results.columns().get(0).type()); + assertEquals(DataType.LONG, results.columns().get(0).type()); boolean sortedDesc = "desc".equals(sortOrder); var expected = LongStream.range(0, 40) @@ -1214,7 +1224,7 @@ public void testGroupingMultiValueByOrdinals() { public void testLoadId() { try (EsqlQueryResponse results = run("from test metadata _id | keep _id | sort _id ")) { - assertThat(results.columns(), equalTo(List.of(new ColumnInfo("_id", "keyword")))); + assertThat(results.columns(), equalTo(List.of(new ColumnInfoImpl("_id", "keyword")))); ListMatcher values = matchesList(); for (int i = 10; i < 50; i++) { values = values.item(List.of(Integer.toString(i))); @@ -1420,12 +1430,12 @@ public void testQueryOnEmptyMappingIndex() { try (EsqlQueryResponse resp = run(from + "METADATA _source | EVAL x = 123")) { assertFalse(resp.values().hasNext()); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("_source", "_source"), new ColumnInfo("x", "integer")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("_source", "_source"), new ColumnInfoImpl("x", "integer")))); } try (EsqlQueryResponse resp = run(from)) { assertFalse(resp.values().hasNext()); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("", "null")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("", "null")))); } } @@ -1450,32 +1460,38 @@ public void testQueryOnEmptyDataIndex() { assertFalse(resp.values().hasNext()); assertThat( resp.columns(), - equalTo(List.of(new ColumnInfo("name", "text"), new ColumnInfo("_source", "_source"), new ColumnInfo("x", "integer"))) + equalTo( + List.of( + new ColumnInfoImpl("name", "text"), + new ColumnInfoImpl("_source", "_source"), + new ColumnInfoImpl("x", "integer") + ) + ) ); } try (EsqlQueryResponse resp = run(from)) { assertFalse(resp.values().hasNext()); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("name", "text")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("name", "text")))); } } private void assertEmptyIndexQueries(String from) { try (EsqlQueryResponse resp = run(from + "METADATA _source | KEEP _source | LIMIT 1")) { assertFalse(resp.values().hasNext()); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("_source", "_source")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("_source", "_source")))); } try (EsqlQueryResponse resp = run(from + "| EVAL y = 1 | KEEP y | LIMIT 1 | EVAL x = 1")) { assertFalse(resp.values().hasNext()); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("y", "integer"), new ColumnInfo("x", "integer")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("y", "integer"), new ColumnInfoImpl("x", "integer")))); } try (EsqlQueryResponse resp = run(from + "| STATS c = count()")) { assertTrue(resp.values().hasNext()); Iterator row = resp.values().next(); assertThat(row.next(), equalTo((long) 0)); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("c", "long")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("c", "long")))); } try (EsqlQueryResponse resp = run(from + "| STATS c = count() | EVAL x = 123")) { @@ -1484,7 +1500,7 @@ private void assertEmptyIndexQueries(String from) { assertThat(row.next(), equalTo((long) 0)); assertThat(row.next(), equalTo(123)); assertFalse(row.hasNext()); - assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("c", "long"), new ColumnInfo("x", "integer")))); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("c", "long"), new ColumnInfoImpl("x", "integer")))); } } @@ -1561,7 +1577,7 @@ private void createAlias(List indices, String alias) throws InterruptedE private void assertNoNestedDocuments(String query, int docsCount, long minValue, long maxValue) { try (EsqlQueryResponse results = run(query)) { - assertThat(results.columns(), contains(new ColumnInfo("data", "long"))); + assertThat(results.columns(), contains(new ColumnInfoImpl("data", "long"))); assertThat(results.columns().size(), is(1)); assertThat(getValuesList(results).size(), is(docsCount)); for (List row : getValuesList(results)) { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index d3471450e4728..cde4f10ef556c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -59,6 +59,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -323,7 +324,10 @@ private void assertCancelled(ActionFuture response) throws Ex * or the cancellation chained from another cancellation and has * "task cancelled". */ - assertThat(cancelException.getMessage(), either(equalTo("test cancel")).or(equalTo("task cancelled"))); + assertThat( + cancelException.getMessage(), + in(List.of("test cancel", "task cancelled", "request cancelled test cancel", "parent task was cancelled [test cancel]")) + ); assertBusy( () -> assertThat( client().admin() diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java index df1b2c9f00f49..e9eada5def0dc 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java @@ -52,7 +52,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings settings = Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(DEFAULT_SETTINGS) - .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(1000, 2000))) + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 4000))) .build(); logger.info("settings {}", settings); return settings; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java index 26ffdf0e13ccd..77726ca9fdcce 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java @@ -8,14 +8,24 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.Build; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.junit.Before; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Comparator; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Objects; +import static org.elasticsearch.index.mapper.DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -28,77 +38,753 @@ protected EsqlQueryResponse run(EsqlQueryRequest request) { } public void testEmpty() { - Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("pod")).build(); + Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("host")).build(); client().admin() .indices() - .prepareCreate("pods") + .prepareCreate("empty_index") .setSettings(settings) .setMapping( "@timestamp", "type=date", - "pod", + "host", "type=keyword,time_series_dimension=true", "cpu", "type=long,time_series_metric=gauge" ) .get(); - run("METRICS pods | LIMIT 1").close(); + run("METRICS empty_index | LIMIT 1").close(); } - public void testSimpleMetrics() { - Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("pod")).build(); + record Doc(String host, String cluster, long timestamp, int requestCount, double cpu) {} + + final List docs = new ArrayList<>(); + + record RequestCounter(long timestamp, long count) { + + } + + static Double computeRate(List values) { + List sorted = values.stream().sorted(Comparator.comparingLong(RequestCounter::timestamp)).toList(); + if (sorted.size() < 2) { + return null; + } + long resets = 0; + for (int i = 0; i < sorted.size() - 1; i++) { + if (sorted.get(i).count > sorted.get(i + 1).count) { + resets += sorted.get(i).count; + } + } + RequestCounter last = sorted.get(sorted.size() - 1); + RequestCounter first = sorted.get(0); + double dv = resets + last.count - first.count; + double dt = last.timestamp - first.timestamp; + return dv * 1000 / dt; + } + + @Before + public void populateIndex() { + // this can be expensive, do one + Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("host", "cluster")).build(); client().admin() .indices() - .prepareCreate("pods") + .prepareCreate("hosts") .setSettings(settings) .setMapping( "@timestamp", "type=date", - "pod", + "host", + "type=keyword,time_series_dimension=true", + "cluster", "type=keyword,time_series_dimension=true", "cpu", - "type=double,time_series_metric=gauge" + "type=double,time_series_metric=gauge", + "request_count", + "type=integer,time_series_metric=counter" ) .get(); - List pods = List.of("p1", "p2", "p3"); - long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-04-15T00:00:00Z"); - int numDocs = between(10, 100); - record Doc(String pod, long timestamp, double cpu) {} - List docs = new ArrayList<>(); + Map hostToClusters = new HashMap<>(); + for (int i = 0; i < 5; i++) { + hostToClusters.put("p" + i, randomFrom("qa", "prod")); + } + long timestamp = DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-04-15T00:00:00Z"); + int numDocs = between(20, 100); + docs.clear(); + Map requestCounts = new HashMap<>(); for (int i = 0; i < numDocs; i++) { - String pod = randomFrom(pods); - int cpu = randomIntBetween(0, 100); - long timestamp = startTime + (1000L * i); - docs.add(new Doc(pod, timestamp, cpu)); - client().prepareIndex("pods").setSource("@timestamp", timestamp, "pod", pod, "cpu", cpu).get(); - } - List sortedGroups = docs.stream().map(d -> d.pod).distinct().sorted().toList(); - client().admin().indices().prepareRefresh("pods").get(); - try (EsqlQueryResponse resp = run("METRICS pods load=avg(cpu) BY pod | SORT pod")) { + List hosts = randomSubsetOf(between(1, hostToClusters.size()), hostToClusters.keySet()); + timestamp += between(1, 10) * 1000L; + for (String host : hosts) { + var requestCount = requestCounts.compute(host, (k, curr) -> { + if (curr == null || randomInt(100) <= 20) { + return randomIntBetween(0, 10); + } else { + return curr + randomIntBetween(1, 10); + } + }); + int cpu = randomIntBetween(0, 100); + docs.add(new Doc(host, hostToClusters.get(host), timestamp, requestCount, cpu)); + } + } + Randomness.shuffle(docs); + for (Doc doc : docs) { + client().prepareIndex("hosts") + .setSource( + "@timestamp", + doc.timestamp, + "host", + doc.host, + "cluster", + doc.cluster, + "cpu", + doc.cpu, + "request_count", + doc.requestCount + ) + .get(); + } + client().admin().indices().prepareRefresh("hosts").get(); + } + + public void testSimpleMetrics() { + List sortedGroups = docs.stream().map(d -> d.host).distinct().sorted().toList(); + client().admin().indices().prepareRefresh("hosts").get(); + try (EsqlQueryResponse resp = run("METRICS hosts load=avg(cpu) BY host | SORT host")) { List> rows = EsqlTestUtils.getValuesList(resp); assertThat(rows, hasSize(sortedGroups.size())); for (int i = 0; i < rows.size(); i++) { List r = rows.get(i); String pod = (String) r.get(1); assertThat(pod, equalTo(sortedGroups.get(i))); - List values = docs.stream().filter(d -> d.pod.equals(pod)).map(d -> d.cpu).toList(); + List values = docs.stream().filter(d -> d.host.equals(pod)).map(d -> d.cpu).toList(); double avg = values.stream().mapToDouble(n -> n).sum() / values.size(); assertThat((double) r.get(0), equalTo(avg)); } } - try (EsqlQueryResponse resp = run("METRICS pods | SORT @timestamp DESC | KEEP @timestamp, pod, cpu | LIMIT 5")) { + try (EsqlQueryResponse resp = run("METRICS hosts | SORT @timestamp DESC, host | KEEP @timestamp, host, cpu | LIMIT 5")) { List> rows = EsqlTestUtils.getValuesList(resp); - List topDocs = docs.stream().sorted(Comparator.comparingLong(Doc::timestamp).reversed()).limit(5).toList(); + List topDocs = docs.stream() + .sorted(Comparator.comparingLong(Doc::timestamp).reversed().thenComparing(Doc::host)) + .limit(5) + .toList(); assertThat(rows, hasSize(topDocs.size())); for (int i = 0; i < rows.size(); i++) { List r = rows.get(i); - long timestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis((String) r.get(0)); + long timestamp = DEFAULT_DATE_TIME_FORMATTER.parseMillis((String) r.get(0)); String pod = (String) r.get(1); double cpu = (Double) r.get(2); assertThat(topDocs.get(i).timestamp, equalTo(timestamp)); - assertThat(topDocs.get(i).pod, equalTo(pod)); + assertThat(topDocs.get(i).host, equalTo(pod)); assertThat(topDocs.get(i).cpu, equalTo(cpu)); } } } + + public void testRateWithoutGrouping() { + record RateKey(String cluster, String host) { + + } + Map> groups = new HashMap<>(); + for (Doc doc : docs) { + RateKey key = new RateKey(doc.cluster, doc.host); + groups.computeIfAbsent(key, k -> new ArrayList<>()).add(new RequestCounter(doc.timestamp, doc.requestCount)); + } + List rates = new ArrayList<>(); + for (List group : groups.values()) { + Double v = computeRate(group); + if (v != null) { + rates.add(v); + } + } + try (var resp = run("METRICS hosts sum(rate(request_count, 1second))")) { + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("sum(rate(request_count, 1second))", "double")))); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(1)); + assertThat((double) values.get(0).get(0), closeTo(rates.stream().mapToDouble(d -> d).sum(), 0.1)); + } + try (var resp = run("METRICS hosts max(rate(request_count)), min(rate(request_count))")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("max(rate(request_count))", "double"), + new ColumnInfoImpl("min(rate(request_count))", "double") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(2)); + assertThat((double) values.get(0).get(0), closeTo(rates.stream().mapToDouble(d -> d).max().orElse(0.0), 0.1)); + assertThat((double) values.get(0).get(1), closeTo(rates.stream().mapToDouble(d -> d).min().orElse(0.0), 0.1)); + } + try (var resp = run("METRICS hosts max(rate(request_count)), avg(rate(request_count)), max(rate(request_count, 1minute))")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("max(rate(request_count))", "double"), + new ColumnInfoImpl("avg(rate(request_count))", "double"), + new ColumnInfoImpl("max(rate(request_count, 1minute))", "double") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(3)); + assertThat((double) values.get(0).get(0), closeTo(rates.stream().mapToDouble(d -> d).max().orElse(0.0), 0.1)); + final double avg = rates.isEmpty() ? 0.0 : rates.stream().mapToDouble(d -> d).sum() / rates.size(); + assertThat((double) values.get(0).get(1), closeTo(avg, 0.1)); + assertThat((double) values.get(0).get(2), closeTo(rates.stream().mapToDouble(d -> d * 60.0).max().orElse(0.0), 0.1)); + } + try (var resp = run("METRICS hosts avg(rate(request_count)), avg(rate(request_count, 1second))")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("avg(rate(request_count))", "double"), + new ColumnInfoImpl("avg(rate(request_count, 1second))", "double") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(2)); + final double avg = rates.isEmpty() ? 0.0 : rates.stream().mapToDouble(d -> d).sum() / rates.size(); + assertThat((double) values.get(0).get(0), closeTo(avg, 0.1)); + assertThat((double) values.get(0).get(1), closeTo(avg, 0.1)); + } + try (var resp = run("METRICS hosts max(rate(request_count)), min(rate(request_count)), min(cpu), max(cpu)")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("max(rate(request_count))", "double"), + new ColumnInfoImpl("min(rate(request_count))", "double"), + new ColumnInfoImpl("min(cpu)", "double"), + new ColumnInfoImpl("max(cpu)", "double") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(4)); + assertThat((double) values.get(0).get(0), closeTo(rates.stream().mapToDouble(d -> d).max().orElse(0.0), 0.1)); + assertThat((double) values.get(0).get(1), closeTo(rates.stream().mapToDouble(d -> d).min().orElse(0.0), 0.1)); + double minCpu = docs.stream().mapToDouble(d -> d.cpu).min().orElse(Long.MAX_VALUE); + double maxCpu = docs.stream().mapToDouble(d -> d.cpu).max().orElse(Long.MIN_VALUE); + assertThat((double) values.get(0).get(2), closeTo(minCpu, 0.1)); + assertThat((double) values.get(0).get(3), closeTo(maxCpu, 0.1)); + } + } + + public void testRateGroupedByCluster() { + record RateKey(String cluster, String host) { + + } + Map> groups = new HashMap<>(); + for (Doc doc : docs) { + RateKey key = new RateKey(doc.cluster, doc.host); + groups.computeIfAbsent(key, k -> new ArrayList<>()).add(new RequestCounter(doc.timestamp, doc.requestCount)); + } + Map> bucketToRates = new HashMap<>(); + for (Map.Entry> e : groups.entrySet()) { + List values = bucketToRates.computeIfAbsent(e.getKey().cluster, k -> new ArrayList<>()); + Double rate = computeRate(e.getValue()); + values.add(Objects.requireNonNullElse(rate, 0.0)); + } + List sortedKeys = bucketToRates.keySet().stream().sorted().toList(); + try (var resp = run("METRICS hosts sum(rate(request_count)) BY cluster | SORT cluster")) { + assertThat( + resp.columns(), + equalTo(List.of(new ColumnInfoImpl("sum(rate(request_count))", "double"), new ColumnInfoImpl("cluster", "keyword"))) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(bucketToRates.size())); + for (int i = 0; i < bucketToRates.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(2)); + String key = sortedKeys.get(i); + assertThat(row.get(1), equalTo(key)); + assertThat((double) row.get(0), closeTo(bucketToRates.get(key).stream().mapToDouble(d -> d).sum(), 0.1)); + } + } + try (var resp = run("METRICS hosts avg(rate(request_count)) BY cluster | SORT cluster")) { + assertThat( + resp.columns(), + equalTo(List.of(new ColumnInfoImpl("avg(rate(request_count))", "double"), new ColumnInfoImpl("cluster", "keyword"))) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(bucketToRates.size())); + for (int i = 0; i < bucketToRates.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(2)); + String key = sortedKeys.get(i); + assertThat(row.get(1), equalTo(key)); + List rates = bucketToRates.get(key); + if (rates.isEmpty()) { + assertThat(row.get(0), equalTo(0.0)); + } else { + double avg = rates.stream().mapToDouble(d -> d).sum() / rates.size(); + assertThat((double) row.get(0), closeTo(avg, 0.1)); + } + } + } + try (var resp = run("METRICS hosts avg(rate(request_count, 1minute)), avg(rate(request_count)) BY cluster | SORT cluster")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("avg(rate(request_count, 1minute))", "double"), + new ColumnInfoImpl("avg(rate(request_count))", "double"), + new ColumnInfoImpl("cluster", "keyword") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(bucketToRates.size())); + for (int i = 0; i < bucketToRates.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(3)); + String key = sortedKeys.get(i); + assertThat(row.get(2), equalTo(key)); + List rates = bucketToRates.get(key); + if (rates.isEmpty()) { + assertThat(row.get(0), equalTo(0.0)); + assertThat(row.get(1), equalTo(0.0)); + } else { + double avg = rates.stream().mapToDouble(d -> d).sum() / rates.size(); + assertThat((double) row.get(0), closeTo(avg * 60.0f, 0.1)); + assertThat((double) row.get(1), closeTo(avg, 0.1)); + } + } + } + } + + public void testRateWithTimeBucket() { + var rounding = new Rounding.Builder(TimeValue.timeValueSeconds(60)).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); + record RateKey(String host, String cluster, long interval) {} + Map> groups = new HashMap<>(); + for (Doc doc : docs) { + RateKey key = new RateKey(doc.host, doc.cluster, rounding.round(doc.timestamp)); + groups.computeIfAbsent(key, k -> new ArrayList<>()).add(new RequestCounter(doc.timestamp, doc.requestCount)); + } + Map> bucketToRates = new HashMap<>(); + for (Map.Entry> e : groups.entrySet()) { + List values = bucketToRates.computeIfAbsent(e.getKey().interval, k -> new ArrayList<>()); + Double rate = computeRate(e.getValue()); + if (rate != null) { + values.add(rate); + } + } + List sortedKeys = bucketToRates.keySet().stream().sorted().limit(5).toList(); + try (var resp = run("METRICS hosts sum(rate(request_count)) BY ts=bucket(@timestamp, 1 minute) | SORT ts | LIMIT 5")) { + assertThat( + resp.columns(), + equalTo(List.of(new ColumnInfoImpl("sum(rate(request_count))", "double"), new ColumnInfoImpl("ts", "date"))) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(2)); + long key = sortedKeys.get(i); + assertThat(row.get(1), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key))); + List bucketValues = bucketToRates.get(key); + if (bucketValues.isEmpty()) { + assertNull(row.get(0)); + } else { + assertThat((double) row.get(0), closeTo(bucketValues.stream().mapToDouble(d -> d).sum(), 0.1)); + } + } + } + try (var resp = run("METRICS hosts avg(rate(request_count)) BY ts=bucket(@timestamp, 1minute) | SORT ts | LIMIT 5")) { + assertThat( + resp.columns(), + equalTo(List.of(new ColumnInfoImpl("avg(rate(request_count))", "double"), new ColumnInfoImpl("ts", "date"))) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(2)); + long key = sortedKeys.get(i); + assertThat(row.get(1), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key))); + List bucketValues = bucketToRates.get(key); + if (bucketValues.isEmpty()) { + assertNull(row.get(0)); + } else { + double avg = bucketValues.stream().mapToDouble(d -> d).sum() / bucketValues.size(); + assertThat((double) row.get(0), closeTo(avg, 0.1)); + } + } + } + try (var resp = run(""" + METRICS hosts avg(rate(request_count, 1minute)), avg(rate(request_count)) BY ts=bucket(@timestamp, 1minute) + | SORT ts + | LIMIT 5 + """)) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("avg(rate(request_count, 1minute))", "double"), + new ColumnInfoImpl("avg(rate(request_count))", "double"), + new ColumnInfoImpl("ts", "date") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(3)); + long key = sortedKeys.get(i); + assertThat(row.get(2), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key))); + List bucketValues = bucketToRates.get(key); + if (bucketValues.isEmpty()) { + assertNull(row.get(0)); + assertNull(row.get(1)); + } else { + double avg = bucketValues.stream().mapToDouble(d -> d).sum() / bucketValues.size(); + assertThat((double) row.get(0), closeTo(avg * 60.0f, 0.1)); + assertThat((double) row.get(1), closeTo(avg, 0.1)); + } + } + } + } + + public void testRateWithTimeBucketAndCluster() { + var rounding = new Rounding.Builder(TimeValue.timeValueSeconds(60)).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); + record RateKey(String host, String cluster, long interval) {} + Map> groups = new HashMap<>(); + for (Doc doc : docs) { + RateKey key = new RateKey(doc.host, doc.cluster, rounding.round(doc.timestamp)); + groups.computeIfAbsent(key, k -> new ArrayList<>()).add(new RequestCounter(doc.timestamp, doc.requestCount)); + } + record GroupKey(String cluster, long interval) {} + Map> rateBuckets = new HashMap<>(); + for (Map.Entry> e : groups.entrySet()) { + RateKey key = e.getKey(); + List values = rateBuckets.computeIfAbsent(new GroupKey(key.cluster, key.interval), k -> new ArrayList<>()); + Double rate = computeRate(e.getValue()); + if (rate != null) { + values.add(rate); + } + } + Map> cpuBuckets = new HashMap<>(); + for (Doc doc : docs) { + GroupKey key = new GroupKey(doc.cluster, rounding.round(doc.timestamp)); + cpuBuckets.computeIfAbsent(key, k -> new ArrayList<>()).add(doc.cpu); + } + List sortedKeys = rateBuckets.keySet() + .stream() + .sorted(Comparator.comparing(GroupKey::interval).thenComparing(GroupKey::cluster)) + .limit(5) + .toList(); + try (var resp = run(""" + METRICS hosts sum(rate(request_count)) BY ts=bucket(@timestamp, 1 minute), cluster + | SORT ts, cluster + | LIMIT 5""")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("sum(rate(request_count))", "double"), + new ColumnInfoImpl("ts", "date"), + new ColumnInfoImpl("cluster", "keyword") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(3)); + var key = sortedKeys.get(i); + assertThat(row.get(1), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key.interval))); + assertThat(row.get(2), equalTo(key.cluster)); + List bucketValues = rateBuckets.get(key); + if (bucketValues.isEmpty()) { + assertNull(row.get(0)); + } else { + assertThat((double) row.get(0), closeTo(bucketValues.stream().mapToDouble(d -> d).sum(), 0.1)); + } + } + } + try (var resp = run(""" + METRICS hosts avg(rate(request_count)) BY ts=bucket(@timestamp, 1minute), cluster + | SORT ts, cluster + | LIMIT 5""")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("avg(rate(request_count))", "double"), + new ColumnInfoImpl("ts", "date"), + new ColumnInfoImpl("cluster", "keyword") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(3)); + var key = sortedKeys.get(i); + assertThat(row.get(1), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key.interval))); + assertThat(row.get(2), equalTo(key.cluster)); + List bucketValues = rateBuckets.get(key); + if (bucketValues.isEmpty()) { + assertNull(row.get(0)); + } else { + double avg = bucketValues.stream().mapToDouble(d -> d).sum() / bucketValues.size(); + assertThat((double) row.get(0), closeTo(avg, 0.1)); + } + } + } + try (var resp = run(""" + METRICS hosts avg(rate(request_count, 1minute)), avg(rate(request_count)) BY ts=bucket(@timestamp, 1minute), cluster + | SORT ts, cluster + | LIMIT 5""")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("avg(rate(request_count, 1minute))", "double"), + new ColumnInfoImpl("avg(rate(request_count))", "double"), + new ColumnInfoImpl("ts", "date"), + new ColumnInfoImpl("cluster", "keyword") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(4)); + var key = sortedKeys.get(i); + assertThat(row.get(2), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key.interval))); + assertThat(row.get(3), equalTo(key.cluster)); + List bucketValues = rateBuckets.get(key); + if (bucketValues.isEmpty()) { + assertNull(row.get(0)); + assertNull(row.get(1)); + } else { + double avg = bucketValues.stream().mapToDouble(d -> d).sum() / bucketValues.size(); + assertThat((double) row.get(0), closeTo(avg * 60.0f, 0.1)); + assertThat((double) row.get(1), closeTo(avg, 0.1)); + } + } + } + try (var resp = run(""" + METRICS hosts + s = sum(rate(request_count)), + c = count(rate(request_count)), + max(rate(request_count)), + avg(rate(request_count)) + BY ts=bucket(@timestamp, 1minute), cluster + | SORT ts, cluster + | LIMIT 5 + | EVAL avg_rate= s/c + | KEEP avg_rate, `max(rate(request_count))`, `avg(rate(request_count))`, ts, cluster + """)) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("avg_rate", "double"), + new ColumnInfoImpl("max(rate(request_count))", "double"), + new ColumnInfoImpl("avg(rate(request_count))", "double"), + new ColumnInfoImpl("ts", "date"), + new ColumnInfoImpl("cluster", "keyword") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(5)); + var key = sortedKeys.get(i); + assertThat(row.get(3), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key.interval))); + assertThat(row.get(4), equalTo(key.cluster)); + List bucketValues = rateBuckets.get(key); + if (bucketValues.isEmpty()) { + assertNull(row.get(0)); + assertNull(row.get(1)); + } else { + double avg = bucketValues.stream().mapToDouble(d -> d).sum() / bucketValues.size(); + assertThat((double) row.get(0), closeTo(avg, 0.1)); + double max = bucketValues.stream().mapToDouble(d -> d).max().orElse(0.0); + assertThat((double) row.get(1), closeTo(max, 0.1)); + } + assertEquals(row.get(0), row.get(2)); + } + } + try (var resp = run(""" + METRICS hosts sum(rate(request_count)), max(cpu) BY ts=bucket(@timestamp, 1 minute), cluster + | SORT ts, cluster + | LIMIT 5""")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("sum(rate(request_count))", "double"), + new ColumnInfoImpl("max(cpu)", "double"), + new ColumnInfoImpl("ts", "date"), + new ColumnInfoImpl("cluster", "keyword") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(4)); + var key = sortedKeys.get(i); + assertThat(row.get(2), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key.interval))); + assertThat(row.get(3), equalTo(key.cluster)); + List rateBucket = rateBuckets.get(key); + if (rateBucket.isEmpty()) { + assertNull(row.get(0)); + } else { + assertThat((double) row.get(0), closeTo(rateBucket.stream().mapToDouble(d -> d).sum(), 0.1)); + } + List cpuBucket = cpuBuckets.get(key); + if (cpuBuckets.isEmpty()) { + assertNull(row.get(1)); + } else { + assertThat((double) row.get(1), closeTo(cpuBucket.stream().mapToDouble(d -> d).max().orElse(0.0), 0.1)); + } + } + } + try (var resp = run(""" + METRICS hosts sum(rate(request_count)), avg(cpu) BY ts=bucket(@timestamp, 1 minute), cluster + | SORT ts, cluster + | LIMIT 5""")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("sum(rate(request_count))", "double"), + new ColumnInfoImpl("avg(cpu)", "double"), + new ColumnInfoImpl("ts", "date"), + new ColumnInfoImpl("cluster", "keyword") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(sortedKeys.size())); + for (int i = 0; i < sortedKeys.size(); i++) { + List row = values.get(i); + assertThat(row, hasSize(4)); + var key = sortedKeys.get(i); + assertThat(row.get(2), equalTo(DEFAULT_DATE_TIME_FORMATTER.formatMillis(key.interval))); + assertThat(row.get(3), equalTo(key.cluster)); + List rateBucket = rateBuckets.get(key); + if (rateBucket.isEmpty()) { + assertNull(row.get(0)); + } else { + assertThat((double) row.get(0), closeTo(rateBucket.stream().mapToDouble(d -> d).sum(), 0.1)); + } + List cpuBucket = cpuBuckets.get(key); + if (cpuBuckets.isEmpty()) { + assertNull(row.get(1)); + } else { + double avg = cpuBucket.stream().mapToDouble(d -> d).sum() / cpuBucket.size(); + assertThat((double) row.get(1), closeTo(avg, 0.1)); + } + } + } + } + + public void testApplyRateBeforeFinalGrouping() { + record RateKey(String cluster, String host) { + + } + Map> groups = new HashMap<>(); + for (Doc doc : docs) { + RateKey key = new RateKey(doc.cluster, doc.host); + groups.computeIfAbsent(key, k -> new ArrayList<>()).add(new RequestCounter(doc.timestamp, doc.requestCount)); + } + List rates = new ArrayList<>(); + for (List group : groups.values()) { + Double v = computeRate(group); + if (v != null) { + rates.add(v); + } + } + try (var resp = run("METRICS hosts sum(abs(rate(request_count, 1second)))")) { + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("sum(abs(rate(request_count, 1second)))", "double")))); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(1)); + assertThat((double) values.get(0).get(0), closeTo(rates.stream().mapToDouble(d -> d).sum(), 0.1)); + } + try (var resp = run("METRICS hosts sum(10.0 * rate(request_count, 1second))")) { + assertThat(resp.columns(), equalTo(List.of(new ColumnInfoImpl("sum(10.0 * rate(request_count, 1second))", "double")))); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(1)); + assertThat((double) values.get(0).get(0), closeTo(rates.stream().mapToDouble(d -> d * 10.0).sum(), 0.1)); + } + try (var resp = run("METRICS hosts sum(20 * rate(request_count, 1second) + 10 * floor(rate(request_count, 1second)))")) { + assertThat( + resp.columns(), + equalTo( + List.of( + new ColumnInfoImpl("sum(20 * rate(request_count, 1second) + 10 * floor(rate(request_count, 1second)))", "double") + ) + ) + ); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), hasSize(1)); + assertThat((double) values.get(0).get(0), closeTo(rates.stream().mapToDouble(d -> 20. * d + 10.0 * Math.floor(d)).sum(), 0.1)); + } + } + + public void testIndexMode() { + createIndex("events"); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + index("events", Integer.toString(i), Map.of("v", i)); + } + refresh("events"); + List columns = List.of( + new ColumnInfoImpl("_index", DataType.KEYWORD), + new ColumnInfoImpl("_index_mode", DataType.KEYWORD) + ); + try (EsqlQueryResponse resp = run(""" + FROM events,hosts METADATA _index_mode, _index + | WHERE _index_mode == "time_series" + | STATS BY _index, _index_mode + """)) { + assertThat(resp.columns(), equalTo(columns)); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values, equalTo(List.of(List.of("hosts", "time_series")))); + } + try (EsqlQueryResponse resp = run(""" + FROM events,hosts METADATA _index_mode, _index + | WHERE _index_mode == "standard" + | STATS BY _index, _index_mode + """)) { + assertThat(resp.columns(), equalTo(columns)); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values, equalTo(List.of(List.of("events", "standard")))); + } + try (EsqlQueryResponse resp = run(""" + FROM events,hosts METADATA _index_mode, _index + | STATS BY _index, _index_mode + | SORT _index + """)) { + assertThat(resp.columns(), equalTo(columns)); + List> values = EsqlTestUtils.getValuesList(resp); + assertThat(values, hasSize(2)); + assertThat(values, equalTo(List.of(List.of("events", "standard"), List.of("hosts", "time_series")))); + } + } } diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 index 61ce9bd9152e8..25b5cae1acdd0 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 @@ -34,13 +34,15 @@ WS : [ \r\n\t]+ -> channel(HIDDEN) ; -fragment INDEX_UNQUOTED_IDENTIFIER_PART - : ~[=`|,[\]/ \t\r\n] - | '/' ~[*/] // allow single / but not followed by another / or * which would start a comment +// in 8.14 ` were not allowed +// this has been relaxed in 8.15 since " is used for quoting +fragment UNQUOTED_SOURCE_PART + : ~[:"=|,[\]/ \t\r\n] + | '/' ~[*/] // allow single / but not followed by another / or * which would start a comment -- used in index pattern date spec ; -INDEX_UNQUOTED_IDENTIFIER - : INDEX_UNQUOTED_IDENTIFIER_PART+ +UNQUOTED_SOURCE + : UNQUOTED_SOURCE_PART+ ; // @@ -202,15 +204,13 @@ mode FROM_MODE; FROM_PIPE : PIPE -> type(PIPE), popMode; FROM_OPENING_BRACKET : OPENING_BRACKET -> type(OPENING_BRACKET); FROM_CLOSING_BRACKET : CLOSING_BRACKET -> type(CLOSING_BRACKET); +FROM_COLON : COLON -> type(COLON); FROM_COMMA : COMMA -> type(COMMA); FROM_ASSIGN : ASSIGN -> type(ASSIGN); -FROM_QUOTED_STRING : QUOTED_STRING -> type(QUOTED_STRING); - METADATA : 'metadata'; -FROM_INDEX_UNQUOTED_IDENTIFIER - : INDEX_UNQUOTED_IDENTIFIER -> type(INDEX_UNQUOTED_IDENTIFIER) - ; +FROM_UNQUOTED_SOURCE : UNQUOTED_SOURCE -> type(UNQUOTED_SOURCE); +FROM_QUOTED_SOURCE : QUOTED_STRING -> type(QUOTED_STRING); FROM_LINE_COMMENT : LINE_COMMENT -> channel(HIDDEN) @@ -301,10 +301,6 @@ ENRICH_POLICY_NAME : (ENRICH_POLICY_NAME_BODY+ COLON)? ENRICH_POLICY_NAME_BODY+ ; -ENRICH_QUOTED_IDENTIFIER - : QUOTED_IDENTIFIER -> type(QUOTED_IDENTIFIER) - ; - ENRICH_MODE_UNQUOTED_VALUE : ENRICH_POLICY_NAME -> type(ENRICH_POLICY_NAME) ; @@ -321,7 +317,7 @@ ENRICH_WS : WS -> channel(HIDDEN) ; -// submode for Enrich to allow different lexing between policy identifier (loose) and field identifiers +// submode for Enrich to allow different lexing between policy source (loose) and field identifiers mode ENRICH_FIELD_MODE; ENRICH_FIELD_PIPE : PIPE -> type(PIPE), popMode, popMode; ENRICH_FIELD_ASSIGN : ASSIGN -> type(ASSIGN); @@ -353,13 +349,13 @@ ENRICH_FIELD_WS // LOOKUP ON key mode LOOKUP_MODE; LOOKUP_PIPE : PIPE -> type(PIPE), popMode; +LOOKUP_COLON : COLON -> type(COLON); LOOKUP_COMMA : COMMA -> type(COMMA); LOOKUP_DOT: DOT -> type(DOT); LOOKUP_ON : ON -> type(ON), pushMode(LOOKUP_FIELD_MODE); -LOOKUP_INDEX_UNQUOTED_IDENTIFIER - : INDEX_UNQUOTED_IDENTIFIER -> type(INDEX_UNQUOTED_IDENTIFIER) - ; +LOOKUP_UNQUOTED_SOURCE: UNQUOTED_SOURCE -> type(UNQUOTED_SOURCE); +LOOKUP_QUOTED_SOURCE : QUOTED_STRING -> type(QUOTED_STRING); LOOKUP_LINE_COMMENT : LINE_COMMENT -> channel(HIDDEN) @@ -486,9 +482,8 @@ SETTING_WS mode METRICS_MODE; METRICS_PIPE : PIPE -> type(PIPE), popMode; -METRICS_INDEX_UNQUOTED_IDENTIFIER - : INDEX_UNQUOTED_IDENTIFIER -> type(INDEX_UNQUOTED_IDENTIFIER), popMode, pushMode(CLOSING_METRICS_MODE) - ; +METRICS_UNQUOTED_SOURCE: UNQUOTED_SOURCE -> type(UNQUOTED_SOURCE), popMode, pushMode(CLOSING_METRICS_MODE); +METRICS_QUOTED_SOURCE : QUOTED_STRING -> type(QUOTED_STRING), popMode, pushMode(CLOSING_METRICS_MODE); METRICS_LINE_COMMENT : LINE_COMMENT -> channel(HIDDEN) @@ -505,6 +500,10 @@ METRICS_WS // TODO: remove this workaround mode - see https://github.com/elastic/elasticsearch/issues/108528 mode CLOSING_METRICS_MODE; +CLOSING_METRICS_COLON + : COLON -> type(COLON), popMode, pushMode(METRICS_MODE) + ; + CLOSING_METRICS_COMMA : COMMA -> type(COMMA), popMode, pushMode(METRICS_MODE) ; diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens index 04798fc3dca8a..63eb3a86419a3 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens @@ -22,7 +22,7 @@ UNKNOWN_CMD=21 LINE_COMMENT=22 MULTILINE_COMMENT=23 WS=24 -INDEX_UNQUOTED_IDENTIFIER=25 +UNQUOTED_SOURCE=25 EXPLAIN_WS=26 EXPLAIN_LINE_COMMENT=27 EXPLAIN_MULTILINE_COMMENT=28 diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 index 69d65ea9a214b..89059822d367b 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 @@ -106,11 +106,21 @@ field ; fromCommand - : FROM indexIdentifier (COMMA indexIdentifier)* metadata? + : FROM indexPattern (COMMA indexPattern)* metadata? ; -indexIdentifier - : INDEX_UNQUOTED_IDENTIFIER +indexPattern + : clusterString COLON indexString + | indexString + ; + +clusterString + : UNQUOTED_SOURCE + ; + +indexString + : UNQUOTED_SOURCE + | QUOTED_STRING ; metadata @@ -119,7 +129,7 @@ metadata ; metadataOption - : METADATA indexIdentifier (COMMA indexIdentifier)* + : METADATA UNQUOTED_SOURCE (COMMA UNQUOTED_SOURCE)* ; deprecated_metadata @@ -127,7 +137,7 @@ deprecated_metadata ; metricsCommand - : METRICS indexIdentifier (COMMA indexIdentifier)* aggregates=fields? (BY grouping=fields)? + : METRICS indexPattern (COMMA indexPattern)* aggregates=fields? (BY grouping=fields)? ; evalCommand @@ -280,5 +290,5 @@ enrichWithClause ; lookupCommand - : LOOKUP tableName=INDEX_UNQUOTED_IDENTIFIER ON matchFields=qualifiedNamePatterns + : LOOKUP tableName=indexPattern ON matchFields=qualifiedNamePatterns ; diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens index 04798fc3dca8a..63eb3a86419a3 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens @@ -22,7 +22,7 @@ UNKNOWN_CMD=21 LINE_COMMENT=22 MULTILINE_COMMENT=23 WS=24 -INDEX_UNQUOTED_IDENTIFIER=25 +UNQUOTED_SOURCE=25 EXPLAIN_WS=26 EXPLAIN_LINE_COMMENT=27 EXPLAIN_MULTILINE_COMMENT=28 diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianPointDocValuesAndConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianPointDocValuesAndConstantEvaluator.java new file mode 100644 index 0000000000000..21b987f830a2c --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianPointDocValuesAndConstantEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceCartesianPointDocValuesAndConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final Point rightValue; + + private final DriverContext driverContext; + + public StDistanceCartesianPointDocValuesAndConstantEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, Point rightValue, DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (LongBlock leftValueBlock = (LongBlock) leftValue.eval(page)) { + LongVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock); + } + return eval(page.getPositionCount(), leftValueVector); + } + } + + public DoubleBlock eval(int positionCount, LongBlock leftValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendDouble(StDistance.processCartesianPointDocValuesAndConstant(leftValueBlock.getLong(leftValueBlock.getFirstValueIndex(p)), rightValue)); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public DoubleBlock eval(int positionCount, LongVector leftValueVector) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendDouble(StDistance.processCartesianPointDocValuesAndConstant(leftValueVector.getLong(p), rightValue)); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceCartesianPointDocValuesAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final Point rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + Point rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceCartesianPointDocValuesAndConstantEvaluator get(DriverContext context) { + return new StDistanceCartesianPointDocValuesAndConstantEvaluator(source, leftValue.get(context), rightValue, context); + } + + @Override + public String toString() { + return "StDistanceCartesianPointDocValuesAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianPointDocValuesAndSourceEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianPointDocValuesAndSourceEvaluator.java new file mode 100644 index 0000000000000..13523eebdb2aa --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianPointDocValuesAndSourceEvaluator.java @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceCartesianPointDocValuesAndSourceEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final EvalOperator.ExpressionEvaluator rightValue; + + private final DriverContext driverContext; + + public StDistanceCartesianPointDocValuesAndSourceEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, EvalOperator.ExpressionEvaluator rightValue, + DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (LongBlock leftValueBlock = (LongBlock) leftValue.eval(page)) { + try (BytesRefBlock rightValueBlock = (BytesRefBlock) rightValue.eval(page)) { + LongVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + BytesRefVector rightValueVector = rightValueBlock.asVector(); + if (rightValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + return eval(page.getPositionCount(), leftValueVector, rightValueVector).asBlock(); + } + } + } + + public DoubleBlock eval(int positionCount, LongBlock leftValueBlock, + BytesRefBlock rightValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (rightValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (rightValueBlock.getValueCount(p) != 1) { + if (rightValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendDouble(StDistance.processCartesianPointDocValuesAndSource(leftValueBlock.getLong(leftValueBlock.getFirstValueIndex(p)), rightValueBlock.getBytesRef(rightValueBlock.getFirstValueIndex(p), rightValueScratch))); + } + return result.build(); + } + } + + public DoubleVector eval(int positionCount, LongVector leftValueVector, + BytesRefVector rightValueVector) { + try(DoubleVector.FixedBuilder result = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendDouble(p, StDistance.processCartesianPointDocValuesAndSource(leftValueVector.getLong(p), rightValueVector.getBytesRef(p, rightValueScratch))); + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceCartesianPointDocValuesAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue, rightValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final EvalOperator.ExpressionEvaluator.Factory rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + EvalOperator.ExpressionEvaluator.Factory rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceCartesianPointDocValuesAndSourceEvaluator get(DriverContext context) { + return new StDistanceCartesianPointDocValuesAndSourceEvaluator(source, leftValue.get(context), rightValue.get(context), context); + } + + @Override + public String toString() { + return "StDistanceCartesianPointDocValuesAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianSourceAndConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianSourceAndConstantEvaluator.java new file mode 100644 index 0000000000000..23416e56788b6 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianSourceAndConstantEvaluator.java @@ -0,0 +1,131 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.io.IOException; +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceCartesianSourceAndConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final Point rightValue; + + private final DriverContext driverContext; + + public StDistanceCartesianSourceAndConstantEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, Point rightValue, DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock leftValueBlock = (BytesRefBlock) leftValue.eval(page)) { + BytesRefVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock); + } + return eval(page.getPositionCount(), leftValueVector); + } + } + + public DoubleBlock eval(int positionCount, BytesRefBlock leftValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendDouble(StDistance.processCartesianSourceAndConstant(leftValueBlock.getBytesRef(leftValueBlock.getFirstValueIndex(p), leftValueScratch), rightValue)); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public DoubleBlock eval(int positionCount, BytesRefVector leftValueVector) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendDouble(StDistance.processCartesianSourceAndConstant(leftValueVector.getBytesRef(p, leftValueScratch), rightValue)); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceCartesianSourceAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final Point rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + Point rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceCartesianSourceAndConstantEvaluator get(DriverContext context) { + return new StDistanceCartesianSourceAndConstantEvaluator(source, leftValue.get(context), rightValue, context); + } + + @Override + public String toString() { + return "StDistanceCartesianSourceAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianSourceAndSourceEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianSourceAndSourceEvaluator.java new file mode 100644 index 0000000000000..537275d14d3a1 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceCartesianSourceAndSourceEvaluator.java @@ -0,0 +1,152 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.io.IOException; +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceCartesianSourceAndSourceEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final EvalOperator.ExpressionEvaluator rightValue; + + private final DriverContext driverContext; + + public StDistanceCartesianSourceAndSourceEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, EvalOperator.ExpressionEvaluator rightValue, + DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock leftValueBlock = (BytesRefBlock) leftValue.eval(page)) { + try (BytesRefBlock rightValueBlock = (BytesRefBlock) rightValue.eval(page)) { + BytesRefVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + BytesRefVector rightValueVector = rightValueBlock.asVector(); + if (rightValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + return eval(page.getPositionCount(), leftValueVector, rightValueVector); + } + } + } + + public DoubleBlock eval(int positionCount, BytesRefBlock leftValueBlock, + BytesRefBlock rightValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (rightValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (rightValueBlock.getValueCount(p) != 1) { + if (rightValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendDouble(StDistance.processCartesianSourceAndSource(leftValueBlock.getBytesRef(leftValueBlock.getFirstValueIndex(p), leftValueScratch), rightValueBlock.getBytesRef(rightValueBlock.getFirstValueIndex(p), rightValueScratch))); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public DoubleBlock eval(int positionCount, BytesRefVector leftValueVector, + BytesRefVector rightValueVector) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendDouble(StDistance.processCartesianSourceAndSource(leftValueVector.getBytesRef(p, leftValueScratch), rightValueVector.getBytesRef(p, rightValueScratch))); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceCartesianSourceAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue, rightValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final EvalOperator.ExpressionEvaluator.Factory rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + EvalOperator.ExpressionEvaluator.Factory rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceCartesianSourceAndSourceEvaluator get(DriverContext context) { + return new StDistanceCartesianSourceAndSourceEvaluator(source, leftValue.get(context), rightValue.get(context), context); + } + + @Override + public String toString() { + return "StDistanceCartesianSourceAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoPointDocValuesAndConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoPointDocValuesAndConstantEvaluator.java new file mode 100644 index 0000000000000..3f96c8bf20ab7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoPointDocValuesAndConstantEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceGeoPointDocValuesAndConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final Point rightValue; + + private final DriverContext driverContext; + + public StDistanceGeoPointDocValuesAndConstantEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, Point rightValue, DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (LongBlock leftValueBlock = (LongBlock) leftValue.eval(page)) { + LongVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock); + } + return eval(page.getPositionCount(), leftValueVector); + } + } + + public DoubleBlock eval(int positionCount, LongBlock leftValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendDouble(StDistance.processGeoPointDocValuesAndConstant(leftValueBlock.getLong(leftValueBlock.getFirstValueIndex(p)), rightValue)); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public DoubleBlock eval(int positionCount, LongVector leftValueVector) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendDouble(StDistance.processGeoPointDocValuesAndConstant(leftValueVector.getLong(p), rightValue)); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceGeoPointDocValuesAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final Point rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + Point rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceGeoPointDocValuesAndConstantEvaluator get(DriverContext context) { + return new StDistanceGeoPointDocValuesAndConstantEvaluator(source, leftValue.get(context), rightValue, context); + } + + @Override + public String toString() { + return "StDistanceGeoPointDocValuesAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoPointDocValuesAndSourceEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoPointDocValuesAndSourceEvaluator.java new file mode 100644 index 0000000000000..0bd1c4b6f5bd3 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoPointDocValuesAndSourceEvaluator.java @@ -0,0 +1,151 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceGeoPointDocValuesAndSourceEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final EvalOperator.ExpressionEvaluator rightValue; + + private final DriverContext driverContext; + + public StDistanceGeoPointDocValuesAndSourceEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, EvalOperator.ExpressionEvaluator rightValue, + DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (LongBlock leftValueBlock = (LongBlock) leftValue.eval(page)) { + try (BytesRefBlock rightValueBlock = (BytesRefBlock) rightValue.eval(page)) { + LongVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + BytesRefVector rightValueVector = rightValueBlock.asVector(); + if (rightValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + return eval(page.getPositionCount(), leftValueVector, rightValueVector); + } + } + } + + public DoubleBlock eval(int positionCount, LongBlock leftValueBlock, + BytesRefBlock rightValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (rightValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (rightValueBlock.getValueCount(p) != 1) { + if (rightValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendDouble(StDistance.processGeoPointDocValuesAndSource(leftValueBlock.getLong(leftValueBlock.getFirstValueIndex(p)), rightValueBlock.getBytesRef(rightValueBlock.getFirstValueIndex(p), rightValueScratch))); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public DoubleBlock eval(int positionCount, LongVector leftValueVector, + BytesRefVector rightValueVector) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendDouble(StDistance.processGeoPointDocValuesAndSource(leftValueVector.getLong(p), rightValueVector.getBytesRef(p, rightValueScratch))); + } catch (IllegalArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceGeoPointDocValuesAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue, rightValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final EvalOperator.ExpressionEvaluator.Factory rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + EvalOperator.ExpressionEvaluator.Factory rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceGeoPointDocValuesAndSourceEvaluator get(DriverContext context) { + return new StDistanceGeoPointDocValuesAndSourceEvaluator(source, leftValue.get(context), rightValue.get(context), context); + } + + @Override + public String toString() { + return "StDistanceGeoPointDocValuesAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoSourceAndConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoSourceAndConstantEvaluator.java new file mode 100644 index 0000000000000..556444ac8d740 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoSourceAndConstantEvaluator.java @@ -0,0 +1,131 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.io.IOException; +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceGeoSourceAndConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final Point rightValue; + + private final DriverContext driverContext; + + public StDistanceGeoSourceAndConstantEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, Point rightValue, DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock leftValueBlock = (BytesRefBlock) leftValue.eval(page)) { + BytesRefVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock); + } + return eval(page.getPositionCount(), leftValueVector); + } + } + + public DoubleBlock eval(int positionCount, BytesRefBlock leftValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendDouble(StDistance.processGeoSourceAndConstant(leftValueBlock.getBytesRef(leftValueBlock.getFirstValueIndex(p), leftValueScratch), rightValue)); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public DoubleBlock eval(int positionCount, BytesRefVector leftValueVector) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendDouble(StDistance.processGeoSourceAndConstant(leftValueVector.getBytesRef(p, leftValueScratch), rightValue)); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceGeoSourceAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final Point rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + Point rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceGeoSourceAndConstantEvaluator get(DriverContext context) { + return new StDistanceGeoSourceAndConstantEvaluator(source, leftValue.get(context), rightValue, context); + } + + @Override + public String toString() { + return "StDistanceGeoSourceAndConstantEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoSourceAndSourceEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoSourceAndSourceEvaluator.java new file mode 100644 index 0000000000000..915c456a63f7c --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceGeoSourceAndSourceEvaluator.java @@ -0,0 +1,152 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import java.io.IOException; +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Warnings; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link StDistance}. + * This class is generated. Do not edit it. + */ +public final class StDistanceGeoSourceAndSourceEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator leftValue; + + private final EvalOperator.ExpressionEvaluator rightValue; + + private final DriverContext driverContext; + + public StDistanceGeoSourceAndSourceEvaluator(Source source, + EvalOperator.ExpressionEvaluator leftValue, EvalOperator.ExpressionEvaluator rightValue, + DriverContext driverContext) { + this.leftValue = leftValue; + this.rightValue = rightValue; + this.driverContext = driverContext; + this.warnings = Warnings.createWarnings(driverContext.warningsMode(), source); + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock leftValueBlock = (BytesRefBlock) leftValue.eval(page)) { + try (BytesRefBlock rightValueBlock = (BytesRefBlock) rightValue.eval(page)) { + BytesRefVector leftValueVector = leftValueBlock.asVector(); + if (leftValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + BytesRefVector rightValueVector = rightValueBlock.asVector(); + if (rightValueVector == null) { + return eval(page.getPositionCount(), leftValueBlock, rightValueBlock); + } + return eval(page.getPositionCount(), leftValueVector, rightValueVector); + } + } + } + + public DoubleBlock eval(int positionCount, BytesRefBlock leftValueBlock, + BytesRefBlock rightValueBlock) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (leftValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (leftValueBlock.getValueCount(p) != 1) { + if (leftValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (rightValueBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (rightValueBlock.getValueCount(p) != 1) { + if (rightValueBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendDouble(StDistance.processGeoSourceAndSource(leftValueBlock.getBytesRef(leftValueBlock.getFirstValueIndex(p), leftValueScratch), rightValueBlock.getBytesRef(rightValueBlock.getFirstValueIndex(p), rightValueScratch))); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public DoubleBlock eval(int positionCount, BytesRefVector leftValueVector, + BytesRefVector rightValueVector) { + try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + BytesRef leftValueScratch = new BytesRef(); + BytesRef rightValueScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendDouble(StDistance.processGeoSourceAndSource(leftValueVector.getBytesRef(p, leftValueScratch), rightValueVector.getBytesRef(p, rightValueScratch))); + } catch (IllegalArgumentException | IOException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "StDistanceGeoSourceAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(leftValue, rightValue); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory leftValue; + + private final EvalOperator.ExpressionEvaluator.Factory rightValue; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory leftValue, + EvalOperator.ExpressionEvaluator.Factory rightValue) { + this.source = source; + this.leftValue = leftValue; + this.rightValue = rightValue; + } + + @Override + public StDistanceGeoSourceAndSourceEvaluator get(DriverContext context) { + return new StDistanceGeoSourceAndSourceEvaluator(source, leftValue.get(context), rightValue.get(context), context); + } + + @Override + public String toString() { + return "StDistanceGeoSourceAndSourceEvaluator[" + "leftValue=" + leftValue + ", rightValue=" + rightValue + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java index 99e4a57757e38..8443b8d99d04a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/VerificationException.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.esql; -import org.elasticsearch.xpack.esql.core.common.Failure; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failure; +import org.elasticsearch.xpack.esql.common.Failures; import java.util.Collection; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfoImpl.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfoImpl.java new file mode 100644 index 0000000000000..94da383b40957 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfoImpl.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.InstantiatingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ParserConstructor; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.esql.action.ColumnInfo; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class ColumnInfoImpl implements ColumnInfo { + + public static final InstantiatingObjectParser PARSER; + static { + InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( + "esql/column_info", + true, + ColumnInfoImpl.class + ); + parser.declareString(constructorArg(), new ParseField("name")); + parser.declareString(constructorArg(), new ParseField("type")); + PARSER = parser.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if ((o instanceof ColumnInfoImpl that)) { + return Objects.equals(name, that.name) && Objects.equals(type, that.type); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(name, type); + } + + public static ColumnInfo fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + private String name; + private DataType type; + + @ParserConstructor + public ColumnInfoImpl(String name, String type) { + this(name, DataType.fromEs(type)); + } + + public ColumnInfoImpl(String name, DataType type) { + this.name = name; + this.type = type; + } + + public ColumnInfoImpl(StreamInput in) throws IOException { + this(in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(type.outputType()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("name", name); + builder.field("type", type.outputType()); + builder.endObject(); + return builder; + } + + @Override + public String name() { + return name; + } + + @Override + public String outputType() { + return type.outputType(); + } + + public DataType type() { + return type; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 9b759a49eab4e..b2a1644fd53d8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -45,9 +45,14 @@ public enum Cap { FN_SUBSTRING_EMPTY_NULL, /** - * Support for aggregation function {@code TOP_LIST}. + * Support for aggregation function {@code TOP}. */ - AGG_TOP_LIST, + AGG_TOP, + + /** + * Support for booleans in aggregations {@code MAX} and {@code MIN}. + */ + AGG_MAX_MIN_BOOLEAN_SUPPORT, /** * Optimization for ST_CENTROID changed some results in cartesian data. #108713 @@ -60,9 +65,12 @@ public enum Cap { METADATA_IGNORED_FIELD, /** - * Support for the syntax {@code "tables": {"type": []}}. + * LOOKUP command with + * - tables using syntax {@code "tables": {"type": []}} + * - fixed variable shadowing + * - fixed Join.references(), requiring breaking change to Join serialization */ - TABLES_TYPES(true), + LOOKUP_V4(true), /** * Support for requesting the "REPEAT" command. @@ -82,7 +90,52 @@ public enum Cap { /** * Support multiple field mappings if appropriate conversion function is used (union types) */ - UNION_TYPES; + UNION_TYPES, + + /** + * Support for function {@code ST_DISTANCE}. Done in #108764. + */ + ST_DISTANCE, + + /** + * Fix to GROK and DISSECT that allows extracting attributes with the same name as the input + * https://github.com/elastic/elasticsearch/issues/110184 + */ + GROK_DISSECT_MASKING, + + /** + * Support for quoting index sources in double quotes. + */ + DOUBLE_QUOTES_SOURCE_ENCLOSING, + + /** + * Support for WEIGHTED_AVG function. + */ + AGG_WEIGHTED_AVG, + + /** + * Fix for union-types when aggregating over an inline conversion with casting operator. Done in #110476. + */ + UNION_TYPES_AGG_CAST, + + /** + * Fix to GROK validation in case of multiple fields with same name and different types + * https://github.com/elastic/elasticsearch/issues/110533 + */ + GROK_VALIDATION, + + /** + * Fix for union-types when aggregating over an inline conversion with conversion function. Done in #110652. + */ + UNION_TYPES_INLINE_FIX, + + /** + * Fix a parsing issue where numbers below Long.MIN_VALUE threw an exception instead of parsing as doubles. + * see Parsing large numbers is inconsistent #104323 + */ + FIX_PARSING_LARGE_NEGATIVE_NUMBERS; + + private final boolean snapshotOnly; Cap() { snapshotOnly = false; @@ -96,7 +149,9 @@ public String capabilityName() { return name().toLowerCase(Locale.ROOT); } - private final boolean snapshotOnly; + public boolean snapshotOnly() { + return snapshotOnly; + } } public static final Set CAPABILITIES = capabilities(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index e81c9919fe0a3..5c9b4244ec0ca 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -45,7 +45,7 @@ public class EsqlQueryRequest extends org.elasticsearch.xpack.core.esql.action.E private Locale locale; private QueryBuilder filter; private QueryPragmas pragmas = new QueryPragmas(Settings.EMPTY); - private QueryParams params = QueryParams.EMPTY; + private QueryParams params = new QueryParams(); private TimeValue waitForCompletionTimeout = DEFAULT_WAIT_FOR_COMPLETION; private TimeValue keepAlive = DEFAULT_KEEP_ALIVE; private boolean keepOnCompletion; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index fdf39545a396b..81fbda2ad6fee 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -25,8 +25,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.core.esql.action.EsqlResponse; +import org.elasticsearch.xpack.esql.core.type.DataType; import java.io.IOException; import java.util.Collections; @@ -45,7 +45,7 @@ public class EsqlQueryResponse extends org.elasticsearch.xpack.core.esql.action. public static final String DROP_NULL_COLUMNS_OPTION = "drop_null_columns"; - private final List columns; + private final List columns; private final List pages; private final Profile profile; private final boolean columnar; @@ -55,7 +55,7 @@ public class EsqlQueryResponse extends org.elasticsearch.xpack.core.esql.action. private final boolean isAsync; public EsqlQueryResponse( - List columns, + List columns, List pages, @Nullable Profile profile, boolean columnar, @@ -72,7 +72,7 @@ public EsqlQueryResponse( this.isAsync = isAsync; } - public EsqlQueryResponse(List columns, List pages, @Nullable Profile profile, boolean columnar, boolean isAsync) { + public EsqlQueryResponse(List columns, List pages, @Nullable Profile profile, boolean columnar, boolean isAsync) { this(columns, pages, profile, columnar, null, false, isAsync); } @@ -97,7 +97,7 @@ static EsqlQueryResponse deserialize(BlockStreamInput in) throws IOException { isRunning = in.readBoolean(); isAsync = in.readBoolean(); } - List columns = in.readCollectionAsList(ColumnInfo::new); + List columns = in.readCollectionAsList(ColumnInfoImpl::new); List pages = in.readCollectionAsList(Page::new); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { profile = in.readOptionalWriteable(Profile::new); @@ -121,7 +121,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(columnar); } - public List columns() { + public List columns() { return columns; } @@ -130,12 +130,12 @@ List pages() { } public Iterator> values() { - List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + List dataTypes = columns.stream().map(ColumnInfoImpl::type).toList(); return ResponseValueUtils.pagesToValues(dataTypes, pages); } public Iterable> rows() { - List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + List dataTypes = columns.stream().map(ColumnInfoImpl::type).toList(); return ResponseValueUtils.valuesForRowsInPages(dataTypes, pages); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java index 0ed77b624f5b0..5ce1ca25c5913 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java @@ -7,9 +7,12 @@ package org.elasticsearch.xpack.esql.action; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.logging.Level; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.rest.ChunkedRestResponseBodyPart; @@ -19,6 +22,8 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xcontent.MediaType; +import org.elasticsearch.xpack.esql.arrow.ArrowFormat; +import org.elasticsearch.xpack.esql.arrow.ArrowResponse; import org.elasticsearch.xpack.esql.formatter.TextFormat; import org.elasticsearch.xpack.esql.plugin.EsqlMediaTypeParser; @@ -26,7 +31,6 @@ import java.util.Locale; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.esql.core.util.LoggingUtils.logOnFailure; import static org.elasticsearch.xpack.esql.formatter.TextFormat.CSV; import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; @@ -135,6 +139,13 @@ private RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws IOExce ChunkedRestResponseBodyPart.fromTextChunks(format.contentType(restRequest), format.format(restRequest, esqlResponse)), releasable ); + } else if (mediaType == ArrowFormat.INSTANCE) { + ArrowResponse arrowResponse = new ArrowResponse( + // Map here to avoid cyclic dependencies between the arrow subproject and its parent + esqlResponse.columns().stream().map(c -> new ArrowResponse.Column(c.outputType(), c.name())).toList(), + esqlResponse.pages() + ); + restResponse = RestResponse.chunked(RestStatus.OK, arrowResponse, Releasables.wrap(arrowResponse, releasable)); } else { restResponse = RestResponse.chunked( RestStatus.OK, @@ -158,7 +169,7 @@ private RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws IOExce */ public ActionListener wrapWithLogging() { ActionListener listener = ActionListener.wrap(this::onResponse, ex -> { - logOnFailure(LOGGER, ex); + logOnFailure(ex); onFailure(ex); }); if (LOGGER.isDebugEnabled() == false) { @@ -179,4 +190,9 @@ public ActionListener wrapWithLogging() { listener.onFailure(ex); }); } + + static void logOnFailure(Throwable throwable) { + RestStatus status = ExceptionsHelper.status(throwable); + LOGGER.log(status.getStatus() >= 500 ? Level.WARN : Level.DEBUG, () -> "Request failed with status [" + status + "]: ", throwable); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java index 1d07ccc276949..0bc1eb46abefe 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/PositionToXContent.java @@ -22,7 +22,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import java.io.IOException; @@ -59,30 +58,30 @@ public XContentBuilder positionToXContent(XContentBuilder builder, ToXContent.Pa protected abstract XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException; - public static PositionToXContent positionToXContent(ColumnInfo columnInfo, Block block, BytesRef scratch) { + public static PositionToXContent positionToXContent(ColumnInfoImpl columnInfo, Block block, BytesRef scratch) { return switch (columnInfo.type()) { - case "long", "counter_long" -> new PositionToXContent(block) { + case LONG, COUNTER_LONG -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(((LongBlock) block).getLong(valueIndex)); } }; - case "integer", "counter_integer" -> new PositionToXContent(block) { + case INTEGER, COUNTER_INTEGER -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(((IntBlock) block).getInt(valueIndex)); } }; - case "double", "counter_double" -> new PositionToXContent(block) { + case DOUBLE, COUNTER_DOUBLE -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(((DoubleBlock) block).getDouble(valueIndex)); } }; - case "unsigned_long" -> new PositionToXContent(block) { + case UNSIGNED_LONG -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { @@ -90,7 +89,7 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(unsignedLongAsNumber(l)); } }; - case "keyword", "text" -> new PositionToXContent(block) { + case KEYWORD, TEXT -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { @@ -103,7 +102,7 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.utf8Value(val.bytes, val.offset, val.length); } }; - case "ip" -> new PositionToXContent(block) { + case IP -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { @@ -111,7 +110,7 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(ipToString(val)); } }; - case "date" -> new PositionToXContent(block) { + case DATETIME -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { @@ -119,21 +118,21 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(dateTimeToString(longVal)); } }; - case "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" -> new PositionToXContent(block) { + case GEO_POINT, GEO_SHAPE, CARTESIAN_POINT, CARTESIAN_SHAPE -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(spatialToString(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); } }; - case "boolean" -> new PositionToXContent(block) { + case BOOLEAN -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(((BooleanBlock) block).getBoolean(valueIndex)); } }; - case "version" -> new PositionToXContent(block) { + case VERSION -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { @@ -141,21 +140,21 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(versionToString(val)); } }; - case "null" -> new PositionToXContent(block) { + case NULL -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.nullValue(); } }; - case "unsupported" -> new PositionToXContent(block) { + case UNSUPPORTED -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(UnsupportedValueSource.UNSUPPORTED_OUTPUT); } }; - case "_source" -> new PositionToXContent(block) { + case SOURCE -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { @@ -166,7 +165,8 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa } } }; - default -> throw new IllegalArgumentException("can't convert values of type [" + columnInfo.type() + "]"); + case DATE_PERIOD, TIME_DURATION, DOC_DATA_TYPE, TSID_DATA_TYPE, SHORT, BYTE, OBJECT, NESTED, FLOAT, HALF_FLOAT, SCALED_FLOAT, + PARTIAL_AGG -> throw new IllegalArgumentException("can't convert values of type [" + columnInfo.type() + "]"); }; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java index 98f2bbf95d3de..290a816275a29 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -25,10 +25,9 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.planner.PlannerUtils; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.io.IOException; import java.io.UncheckedIOException; @@ -57,7 +56,7 @@ public final class ResponseValueUtils { * Returns an iterator of iterators over the values in the given pages. There is one iterator * for each block. */ - public static Iterator> pagesToValues(List dataTypes, List pages) { + public static Iterator> pagesToValues(List dataTypes, List pages) { BytesRef scratch = new BytesRef(); return Iterators.flatMap( pages.iterator(), @@ -70,18 +69,18 @@ public static Iterator> pagesToValues(List dataTypes, L } /** Returns an iterable of iterables over the values in the given pages. There is one iterables for each row. */ - static Iterable> valuesForRowsInPages(List dataTypes, List pages) { + static Iterable> valuesForRowsInPages(List dataTypes, List pages) { BytesRef scratch = new BytesRef(); return () -> Iterators.flatMap(pages.iterator(), page -> valuesForRowsInPage(dataTypes, page, scratch)); } /** Returns an iterable of iterables over the values in the given page. There is one iterables for each row. */ - static Iterator> valuesForRowsInPage(List dataTypes, Page page, BytesRef scratch) { + static Iterator> valuesForRowsInPage(List dataTypes, Page page, BytesRef scratch) { return Iterators.forRange(0, page.getPositionCount(), position -> valuesForRow(dataTypes, page, position, scratch)); } /** Returns an iterable over the values in the given row in a page. */ - static Iterable valuesForRow(List dataTypes, Page page, int position, BytesRef scratch) { + static Iterable valuesForRow(List dataTypes, Page page, int position, BytesRef scratch) { return () -> Iterators.forRange( 0, page.getBlockCount(), @@ -90,7 +89,7 @@ static Iterable valuesForRow(List dataTypes, Page page, int posi } /** Returns an iterator of values for the given column. */ - static Iterator valuesForColumn(int columnIndex, String dataType, List pages) { + static Iterator valuesForColumn(int columnIndex, DataType dataType, List pages) { BytesRef scratch = new BytesRef(); return Iterators.flatMap( pages.iterator(), @@ -103,7 +102,7 @@ static Iterator valuesForColumn(int columnIndex, String dataType, List

      unsignedLongAsNumber(((LongBlock) block).getLong(offset)); - case "long", "counter_long" -> ((LongBlock) block).getLong(offset); - case "integer", "counter_integer" -> ((IntBlock) block).getInt(offset); - case "double", "counter_double" -> ((DoubleBlock) block).getDouble(offset); - case "keyword", "text" -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); - case "ip" -> { + case UNSIGNED_LONG -> unsignedLongAsNumber(((LongBlock) block).getLong(offset)); + case LONG, COUNTER_LONG -> ((LongBlock) block).getLong(offset); + case INTEGER, COUNTER_INTEGER -> ((IntBlock) block).getInt(offset); + case DOUBLE, COUNTER_DOUBLE -> ((DoubleBlock) block).getDouble(offset); + case KEYWORD, TEXT -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); + case IP -> { BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); yield ipToString(val); } - case "date" -> { + case DATETIME -> { long longVal = ((LongBlock) block).getLong(offset); yield dateTimeToString(longVal); } - case "boolean" -> ((BooleanBlock) block).getBoolean(offset); - case "version" -> versionToString(((BytesRefBlock) block).getBytesRef(offset, scratch)); - case "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" -> spatialToString( + case BOOLEAN -> ((BooleanBlock) block).getBoolean(offset); + case VERSION -> versionToString(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case GEO_POINT, GEO_SHAPE, CARTESIAN_POINT, CARTESIAN_SHAPE -> spatialToString( ((BytesRefBlock) block).getBytesRef(offset, scratch) ); - case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; - case "_source" -> { + case UNSUPPORTED -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; + case SOURCE -> { BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); try { try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { @@ -152,7 +151,8 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef throw new UncheckedIOException(e); } } - default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); + case SHORT, BYTE, FLOAT, HALF_FLOAT, SCALED_FLOAT, OBJECT, NESTED, DATE_PERIOD, TIME_DURATION, DOC_DATA_TYPE, TSID_DATA_TYPE, + NULL, PARTIAL_AGG -> throw EsqlIllegalArgumentException.illegalDataType(dataType); }; } @@ -160,10 +160,10 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef * Converts a list of values to Pages so that we can parse from xcontent. It's not * super efficient, but it doesn't really have to be. */ - static Page valuesToPage(BlockFactory blockFactory, List columns, List> values) { - List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + static Page valuesToPage(BlockFactory blockFactory, List columns, List> values) { + List dataTypes = columns.stream().map(ColumnInfoImpl::type).toList(); List results = dataTypes.stream() - .map(c -> PlannerUtils.toElementType(EsqlDataTypes.fromName(c)).newBlockBuilder(values.size(), blockFactory)) + .map(c -> PlannerUtils.toElementType(c).newBlockBuilder(values.size(), blockFactory)) .toList(); for (List row : values) { @@ -171,24 +171,20 @@ static Page valuesToPage(BlockFactory blockFactory, List columns, Li var builder = results.get(c); var value = row.get(c); switch (dataTypes.get(c)) { - case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong( - longToUnsignedLong(((Number) value).longValue(), true) - ); - case "long", "counter_long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); - case "integer", "counter_integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); - case "double", "counter_double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); - case "keyword", "text", "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( - new BytesRef(value.toString()) - ); - case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef(stringToIP(value.toString())); - case "date" -> { + case UNSIGNED_LONG -> ((LongBlock.Builder) builder).appendLong(longToUnsignedLong(((Number) value).longValue(), true)); + case LONG, COUNTER_LONG -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); + case INTEGER, COUNTER_INTEGER -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); + case DOUBLE, COUNTER_DOUBLE -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); + case KEYWORD, TEXT, UNSUPPORTED -> ((BytesRefBlock.Builder) builder).appendBytesRef(new BytesRef(value.toString())); + case IP -> ((BytesRefBlock.Builder) builder).appendBytesRef(stringToIP(value.toString())); + case DATETIME -> { long longVal = dateTimeToLong(value.toString()); ((LongBlock.Builder) builder).appendLong(longVal); } - case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); - case "null" -> builder.appendNull(); - case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(stringToVersion(new BytesRef(value.toString()))); - case "_source" -> { + case BOOLEAN -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); + case NULL -> builder.appendNull(); + case VERSION -> ((BytesRefBlock.Builder) builder).appendBytesRef(stringToVersion(new BytesRef(value.toString()))); + case SOURCE -> { @SuppressWarnings("unchecked") Map o = (Map) value; try { @@ -200,12 +196,11 @@ static Page valuesToPage(BlockFactory blockFactory, List columns, Li throw new UncheckedIOException(e); } } - case "geo_point", "geo_shape", "cartesian_point", "cartesian_shape" -> { + case GEO_POINT, GEO_SHAPE, CARTESIAN_POINT, CARTESIAN_SHAPE -> { // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here BytesRef wkb = stringToSpatial(value.toString()); ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); } - default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java index ad76fde7eca26..d7d0d9033d3b9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java @@ -27,7 +27,7 @@ final class ResponseXContentUtils { /** * Returns the column headings for the given columns. */ - static Iterator allColumns(List columns, String name) { + static Iterator allColumns(List columns, String name) { return ChunkedToXContentHelper.singleChunk((builder, params) -> { builder.startArray(name); for (ColumnInfo col : columns) { @@ -41,7 +41,7 @@ static Iterator allColumns(List columns, Strin * Returns the column headings for the given columns, moving the heading * for always-null columns to a {@code null_columns} section. */ - static Iterator nonNullColumns(List columns, boolean[] nullColumns, String name) { + static Iterator nonNullColumns(List columns, boolean[] nullColumns, String name) { return ChunkedToXContentHelper.singleChunk((builder, params) -> { builder.startArray(name); for (int c = 0; c < columns.size(); c++) { @@ -55,7 +55,7 @@ static Iterator nonNullColumns(List columns, b /** Returns the column values for the given pages (described by the column infos). */ static Iterator columnValues( - List columns, + List columns, List pages, boolean columnar, boolean[] nullColumns @@ -70,7 +70,7 @@ static Iterator columnValues( } /** Returns a columnar based representation of the values in the given pages (described by the column infos). */ - static Iterator columnarValues(List columns, List pages, boolean[] nullColumns) { + static Iterator columnarValues(List columns, List pages, boolean[] nullColumns) { final BytesRef scratch = new BytesRef(); return Iterators.flatMap(Iterators.forRange(0, columns.size(), column -> { if (nullColumns != null && nullColumns[column]) { @@ -96,7 +96,7 @@ static Iterator columnarValues(List columns, L } /** Returns a row based representation of the values in the given pages (described by the column infos). */ - static Iterator rowValues(List columns, List pages, boolean[] nullColumns) { + static Iterator rowValues(List columns, List pages, boolean[] nullColumns) { final BytesRef scratch = new BytesRef(); return Iterators.flatMap(pages.iterator(), page -> { final int columnCount = columns.size(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 77a51c8415545..5fdd6309ae71e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -14,11 +14,10 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.analyzer.AnalyzerRules; -import org.elasticsearch.xpack.esql.core.analyzer.AnalyzerRules.BaseAnalyzerRule; -import org.elasticsearch.xpack.esql.core.analyzer.AnalyzerRules.ParameterizedAnalyzerRule; +import org.elasticsearch.xpack.esql.analysis.AnalyzerRules.BaseAnalyzerRule; +import org.elasticsearch.xpack.esql.analysis.AnalyzerRules.ParameterizedAnalyzerRule; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; -import org.elasticsearch.xpack.esql.core.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; @@ -33,16 +32,11 @@ import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionDefinition; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; -import org.elasticsearch.xpack.esql.core.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.esql.core.rule.RuleExecutor; @@ -58,6 +52,8 @@ import org.elasticsearch.xpack.esql.expression.NamedExpressions; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.expression.function.FunctionDefinition; +import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; @@ -68,14 +64,15 @@ import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.EsqlAggregate; -import org.elasticsearch.xpack.esql.plan.logical.EsqlUnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Keep; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.Rename; +import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -171,16 +168,17 @@ protected Iterable> batches() { return rules; } - private static class ResolveTable extends ParameterizedAnalyzerRule { + private static class ResolveTable extends ParameterizedAnalyzerRule { @Override - protected LogicalPlan rule(EsqlUnresolvedRelation plan, AnalyzerContext context) { + protected LogicalPlan rule(UnresolvedRelation plan, AnalyzerContext context) { if (context.indexResolution().isValid() == false) { return plan.unresolvedMessage().equals(context.indexResolution().toString()) ? plan - : new EsqlUnresolvedRelation( + : new UnresolvedRelation( plan.source(), plan.table(), + plan.frozen(), plan.metadataFields(), plan.indexMode(), context.indexResolution().toString() @@ -189,9 +187,10 @@ protected LogicalPlan rule(EsqlUnresolvedRelation plan, AnalyzerContext context) TableIdentifier table = plan.table(); if (context.indexResolution().matches(table.index()) == false) { // TODO: fix this (and tests), or drop check (seems SQL-inherited, where's also defective) - new EsqlUnresolvedRelation( + new UnresolvedRelation( plan.source(), plan.table(), + plan.frozen(), plan.metadataFields(), plan.indexMode(), "invalid [" + table + "] resolution to [" + context.indexResolution() + "]" @@ -226,8 +225,7 @@ private static void mappingAsAttributes(List list, Source source, Str if (t != null) { name = parentName == null ? name : parentName + "." + name; var fieldProperties = t.getProperties(); - // widen the data type - var type = EsqlDataTypes.widenSmallNumericTypes(t.getDataType()); + var type = t.getDataType().widenSmallNumeric(); // due to a bug also copy the field since the Attribute hierarchy extracts the data type // directly even if the data type is passed explicitly if (type != t.getDataType()) { @@ -452,7 +450,7 @@ private LogicalPlan resolveAggregate(Aggregate a, List childrenOutput } groupings = newGroupings; if (changed.get()) { - a = new EsqlAggregate(a.source(), a.child(), newGroupings, a.aggregates()); + a = new Aggregate(a.source(), a.child(), a.aggregateType(), newGroupings, a.aggregates()); changed.set(false); } } @@ -481,7 +479,7 @@ private LogicalPlan resolveAggregate(Aggregate a, List childrenOutput newAggregates.add(agg); } - a = changed.get() ? new EsqlAggregate(a.source(), a.child(), groupings, newAggregates) : a; + a = changed.get() ? new Aggregate(a.source(), a.child(), a.aggregateType(), groupings, newAggregates) : a; } return a; @@ -520,13 +518,13 @@ private LogicalPlan resolveLookup(Lookup l, List childrenOutput) { } // check the on field against both the child output and the inner relation - List matchFields = new ArrayList<>(l.matchFields().size()); + List matchFields = new ArrayList<>(l.matchFields().size()); List localOutput = l.localRelation().output(); boolean modified = false; - for (NamedExpression ne : l.matchFields()) { - NamedExpression matchFieldChildReference = ne; - if (ne instanceof UnresolvedAttribute ua && ua.customMessage() == false) { + for (Attribute matchField : l.matchFields()) { + Attribute matchFieldChildReference = matchField; + if (matchField instanceof UnresolvedAttribute ua && ua.customMessage() == false) { modified = true; Attribute joinedAttribute = maybeResolveAttribute(ua, localOutput); // can't find the field inside the local relation @@ -867,16 +865,18 @@ private static class ResolveFunctions extends ParameterizedAnalyzerRule resolveFunction(uf, context.configuration(), context.functionRegistry()) + uf -> resolveFunction(uf, context.configuration(), snapshotRegistry) ); } public static org.elasticsearch.xpack.esql.core.expression.function.Function resolveFunction( UnresolvedFunction uf, Configuration configuration, - FunctionRegistry functionRegistry + EsqlFunctionRegistry functionRegistry ) { org.elasticsearch.xpack.esql.core.expression.function.Function f = null; if (uf.analyzed()) { @@ -925,10 +925,7 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { private static class ImplicitCasting extends ParameterizedRule { @Override public LogicalPlan apply(LogicalPlan plan, AnalyzerContext context) { - return plan.transformExpressionsUp( - ScalarFunction.class, - e -> ImplicitCasting.cast(e, (EsqlFunctionRegistry) context.functionRegistry()) - ); + return plan.transformExpressionsUp(ScalarFunction.class, e -> ImplicitCasting.cast(e, context.functionRegistry())); } private static Expression cast(ScalarFunction f, EsqlFunctionRegistry registry) { @@ -1090,6 +1087,23 @@ protected LogicalPlan doRule(LogicalPlan plan) { return plan; } + // In ResolveRefs the aggregates are resolved from the groupings, which might have an unresolved MultiTypeEsField. + // Now that we have resolved those, we need to re-resolve the aggregates. + if (plan instanceof Aggregate agg) { + // If the union-types resolution occurred in a child of the aggregate, we need to check the groupings + plan = agg.transformExpressionsOnly(FieldAttribute.class, UnresolveUnionTypes::checkUnresolved); + + // Aggregates where the grouping key comes from a union-type field need to be resolved against the grouping key + Map resolved = new HashMap<>(); + for (Expression e : agg.groupings()) { + Attribute attr = Expressions.attribute(e); + if (attr != null && attr.resolved()) { + resolved.put(attr, e); + } + } + plan = plan.transformExpressionsOnly(UnresolvedAttribute.class, ua -> resolveAttribute(ua, resolved)); + } + // Otherwise drop the converted attributes after the alias function, as they are only needed for this function, and // the original version of the attribute should still be seen as unconverted. plan = dropConvertedAttributes(plan, unionFieldAttributes); @@ -1113,6 +1127,15 @@ protected LogicalPlan doRule(LogicalPlan plan) { return plan; } + private Expression resolveAttribute(UnresolvedAttribute ua, Map resolved) { + var named = resolveAgainstList(ua, resolved.keySet()); + return switch (named.size()) { + case 0 -> ua; + case 1 -> named.get(0).equals(ua) ? ua : resolved.get(named.get(0)); + default -> ua.withUnresolvedMessage("Resolved [" + ua + "] unexpectedly to multiple attributes " + named); + }; + } + private LogicalPlan dropConvertedAttributes(LogicalPlan plan, List unionFieldAttributes) { List projections = new ArrayList<>(plan.output()); for (var e : unionFieldAttributes) { @@ -1204,9 +1227,8 @@ protected LogicalPlan rule(LogicalPlan plan) { return plan.transformExpressionsOnly(FieldAttribute.class, UnresolveUnionTypes::checkUnresolved); } - private static Attribute checkUnresolved(FieldAttribute fa) { - var field = fa.field(); - if (field instanceof InvalidMappedField imf) { + static Attribute checkUnresolved(FieldAttribute fa) { + if (fa.field() instanceof InvalidMappedField imf) { String unresolvedMessage = "Cannot use field [" + fa.name() + "] due to ambiguities being " + imf.errorMessage(); return new UnresolvedAttribute(fa.source(), fa.name(), fa.qualifier(), fa.id(), unresolvedMessage, null); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java index c488aa2261d51..5585a3f117d2f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java @@ -7,13 +7,13 @@ package org.elasticsearch.xpack.esql.analysis; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; import org.elasticsearch.xpack.esql.core.index.IndexResolution; +import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; public record AnalyzerContext( EsqlConfiguration configuration, - FunctionRegistry functionRegistry, + EsqlFunctionRegistry functionRegistry, IndexResolution indexResolution, EnrichResolution enrichResolution ) {} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/AnalyzerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerRules.java similarity index 97% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/AnalyzerRules.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerRules.java index ce188511fe7bc..3314129fae405 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/analyzer/AnalyzerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerRules.java @@ -5,13 +5,13 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.analyzer; +package org.elasticsearch.xpack.esql.analysis; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.Rule; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.Collection; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java index 7c37d5b8392c5..51416ed168ace 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java @@ -8,9 +8,9 @@ package org.elasticsearch.xpack.esql.analysis; import org.elasticsearch.xpack.esql.core.analyzer.TableInfo; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsqlUnresolvedRelation; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import java.util.ArrayList; import java.util.List; @@ -43,7 +43,7 @@ protected PreAnalysis doPreAnalyze(LogicalPlan plan) { List indices = new ArrayList<>(); List unresolvedEnriches = new ArrayList<>(); - plan.forEachUp(EsqlUnresolvedRelation.class, p -> { indices.add(new TableInfo(p.table(), p.frozen())); }); + plan.forEachUp(UnresolvedRelation.class, p -> indices.add(new TableInfo(p.table(), p.frozen()))); plan.forEachUp(Enrich.class, unresolvedEnriches::add); // mark plan as preAnalyzed (if it were marked, there would be no analysis) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 367ba682274c9..a4e0d99b0d3fc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; -import org.elasticsearch.xpack.esql.core.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; @@ -20,13 +20,10 @@ import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; @@ -34,10 +31,15 @@ import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.stats.Metrics; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -51,9 +53,9 @@ import java.util.function.Consumer; import java.util.stream.Stream; -import static org.elasticsearch.xpack.esql.core.analyzer.VerifierChecks.checkFilterConditionType; -import static org.elasticsearch.xpack.esql.core.common.Failure.fail; +import static org.elasticsearch.xpack.esql.common.Failure.fail; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; public class Verifier { @@ -176,6 +178,15 @@ else if (p instanceof Lookup lookup) { return failures; } + private static void checkFilterConditionType(LogicalPlan p, Set localFailures) { + if (p instanceof Filter f) { + Expression condition = f.condition(); + if (condition.dataType() != BOOLEAN) { + localFailures.add(fail(condition, "Condition expression needs to be boolean, found [{}]", condition.dataType())); + } + } + } + private static void checkAggregate(LogicalPlan p, Set failures) { if (p instanceof Aggregate agg) { List groupings = agg.groupings(); @@ -208,7 +219,7 @@ private static void checkAggregate(LogicalPlan p, Set failures) { if (attr != null) { groupRefs.add(attr); } - if (e instanceof FieldAttribute f && EsqlDataTypes.isCounterType(f.dataType())) { + if (e instanceof FieldAttribute f && f.dataType().isCounter()) { failures.add(fail(e, "cannot group by on [{}] type for grouping [{}]", f.dataType().typeName(), e.sourceText())); } }); @@ -225,6 +236,14 @@ private static void checkAggregate(LogicalPlan p, Set failures) { // traverse the tree to find invalid matches checkInvalidNamedExpressionUsage(exp, groupings, groupRefs, failures, 0); }); + if (agg.aggregateType() == Aggregate.AggregateType.METRICS) { + aggs.forEach(a -> checkRateAggregates(a, 0, failures)); + } else { + agg.forEachExpression( + Rate.class, + r -> failures.add(fail(r, "the rate aggregate[{}] can only be used within the metrics command", r.sourceText())) + ); + } } else { p.forEachExpression( GroupingFunction.class, @@ -233,6 +252,26 @@ private static void checkAggregate(LogicalPlan p, Set failures) { } } + private static void checkRateAggregates(Expression expr, int nestedLevel, Set failures) { + if (expr instanceof AggregateFunction) { + nestedLevel++; + } + if (expr instanceof Rate r) { + if (nestedLevel != 2) { + failures.add( + fail( + expr, + "the rate aggregate [{}] can only be used within the metrics command and inside another aggregate", + r.sourceText() + ) + ); + } + } + for (Expression child : expr.children()) { + checkRateAggregates(child, nestedLevel, failures); + } + } + // traverse the expression and look either for an agg function or a grouping match // stop either when no children are left, the leafs are literals or a reference attribute is given private static void checkInvalidNamedExpressionUsage( @@ -245,7 +284,10 @@ private static void checkInvalidNamedExpressionUsage( // found an aggregate, constant or a group, bail out if (e instanceof AggregateFunction af) { af.field().forEachDown(AggregateFunction.class, f -> { - failures.add(fail(f, "nested aggregations [{}] not allowed inside other aggregations [{}]", f, af)); + // rate aggregate is allowed to be inside another aggregate + if (f instanceof Rate == false) { + failures.add(fail(f, "nested aggregations [{}] not allowed inside other aggregations [{}]", f, af)); + } }); } else if (e instanceof GroupingFunction gf) { // optimizer will later unroll expressions with aggs and non-aggs with a grouping function into an EVAL, but that will no longer @@ -337,7 +379,11 @@ private static void checkEvalFields(LogicalPlan p, Set failures) { } // check no aggregate functions are used field.forEachDown(AggregateFunction.class, af -> { - failures.add(fail(af, "aggregate function [{}] not allowed outside STATS command", af.sourceText())); + if (af instanceof Rate) { + failures.add(fail(af, "aggregate function [{}] not allowed outside METRICS command", af.sourceText())); + } else { + failures.add(fail(af, "aggregate function [{}] not allowed outside STATS command", af.sourceText())); + } }); }); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java index 4d30f32af5f15..f6733fa3f175c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/Validatable.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.capabilities; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; /** * Interface implemented by expressions that require validation post logical optimization, diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failure.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failure.java similarity index 97% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failure.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failure.java index 719ae7ffbd1ca..e5d0fb7ba0b3d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failure.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failure.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.common; +package org.elasticsearch.xpack.esql.common; import org.elasticsearch.xpack.esql.core.tree.Location; import org.elasticsearch.xpack.esql.core.tree.Node; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failures.java similarity index 96% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failures.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failures.java index c06fe94c9a338..fd25cb427d95b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/common/Failures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/common/Failures.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.common; +package org.elasticsearch.xpack.esql.common; import java.util.Collection; import java.util.LinkedHashSet; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 05b78c8b5f309..2425fa24b17c2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -31,7 +31,6 @@ import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LocalCircuitBreaker; import org.elasticsearch.compute.data.OrdinalBytesRefBlock; @@ -43,6 +42,7 @@ import org.elasticsearch.compute.operator.OutputOperator; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.MappedFieldType; @@ -247,30 +247,53 @@ private void doLookup( ActionListener listener ) { Block inputBlock = inputPage.getBlock(0); - final IntBlock selectedPositions; - final OrdinalBytesRefBlock ordinalsBytesRefBlock; - if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { - inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); - selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); - selectedPositions.mustIncRef(); - } else { - selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock(); + if (inputBlock.areAllValuesNull()) { + listener.onResponse(createNullResponse(inputPage.getPositionCount(), extractFields)); + return; } - LocalCircuitBreaker localBreaker = null; + final List releasables = new ArrayList<>(6); + boolean started = false; try { - if (inputBlock.areAllValuesNull()) { - listener.onResponse(createNullResponse(inputPage.getPositionCount(), extractFields)); - return; - } - ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY); - SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, SearchService.NO_TIMEOUT); - listener = ActionListener.runBefore(listener, searchContext::close); - localBreaker = new LocalCircuitBreaker( + final ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY); + final SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, SearchService.NO_TIMEOUT); + releasables.add(searchContext); + final LocalCircuitBreaker localBreaker = new LocalCircuitBreaker( blockFactory.breaker(), localBreakerSettings.overReservedBytes(), localBreakerSettings.maxOverReservedBytes() ); - DriverContext driverContext = new DriverContext(bigArrays, blockFactory.newChildFactory(localBreaker)); + releasables.add(localBreaker); + final DriverContext driverContext = new DriverContext(bigArrays, blockFactory.newChildFactory(localBreaker)); + final ElementType[] mergingTypes = new ElementType[extractFields.size()]; + for (int i = 0; i < extractFields.size(); i++) { + mergingTypes[i] = PlannerUtils.toElementType(extractFields.get(i).dataType()); + } + final int[] mergingChannels = IntStream.range(0, extractFields.size()).map(i -> i + 2).toArray(); + final MergePositionsOperator mergePositionsOperator; + final OrdinalBytesRefBlock ordinalsBytesRefBlock; + if (inputBlock instanceof BytesRefBlock bytesRefBlock && (ordinalsBytesRefBlock = bytesRefBlock.asOrdinals()) != null) { + inputBlock = ordinalsBytesRefBlock.getDictionaryVector().asBlock(); + var selectedPositions = ordinalsBytesRefBlock.getOrdinalsBlock(); + mergePositionsOperator = new MergePositionsOperator( + 1, + mergingChannels, + mergingTypes, + selectedPositions, + driverContext.blockFactory() + ); + + } else { + try (var selectedPositions = IntVector.range(0, inputBlock.getPositionCount(), blockFactory).asBlock()) { + mergePositionsOperator = new MergePositionsOperator( + 1, + mergingChannels, + mergingTypes, + selectedPositions, + driverContext.blockFactory() + ); + } + } + releasables.add(mergePositionsOperator); SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); MappedFieldType fieldType = searchExecutionContext.getFieldType(matchField); var queryList = switch (matchType) { @@ -284,57 +307,13 @@ private void doLookup( queryList, searchExecutionContext.getIndexReader() ); - List intermediateOperators = new ArrayList<>(extractFields.size() + 2); - final ElementType[] mergingTypes = new ElementType[extractFields.size()]; - // load the fields - List fields = new ArrayList<>(extractFields.size()); - for (int i = 0; i < extractFields.size(); i++) { - NamedExpression extractField = extractFields.get(i); - final ElementType elementType = PlannerUtils.toElementType(extractField.dataType()); - mergingTypes[i] = elementType; - EsPhysicalOperationProviders.ShardContext ctx = new EsPhysicalOperationProviders.DefaultShardContext( - 0, - searchContext.getSearchExecutionContext(), - searchContext.request().getAliasFilter() - ); - BlockLoader loader = ctx.blockLoader( - extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), - EsqlDataTypes.isUnsupported(extractField.dataType()), - MappedFieldType.FieldExtractPreference.NONE - ); - fields.add( - new ValuesSourceReaderOperator.FieldInfo( - extractField.name(), - PlannerUtils.toElementType(extractField.dataType()), - shardIdx -> { - if (shardIdx != 0) { - throw new IllegalStateException("only one shard"); - } - return loader; - } - ) - ); - } - intermediateOperators.add( - new ValuesSourceReaderOperator( - driverContext.blockFactory(), - fields, - List.of( - new ValuesSourceReaderOperator.ShardContext( - searchContext.searcher().getIndexReader(), - searchContext::newSourceLoader - ) - ), - 0 - ) - ); - // merging field-values by position - final int[] mergingChannels = IntStream.range(0, extractFields.size()).map(i -> i + 2).toArray(); - intermediateOperators.add( - new MergePositionsOperator(1, mergingChannels, mergingTypes, selectedPositions, driverContext.blockFactory()) - ); + releasables.add(queryOperator); + var extractFieldsOperator = extractFieldsOperator(searchContext, driverContext, extractFields); + releasables.add(extractFieldsOperator); + AtomicReference result = new AtomicReference<>(); OutputOperator outputOperator = new OutputOperator(List.of(), Function.identity(), result::set); + releasables.add(outputOperator); Driver driver = new Driver( "enrich-lookup:" + sessionId, System.currentTimeMillis(), @@ -350,18 +329,16 @@ private void doLookup( inputPage.getPositionCount() ), queryOperator, - intermediateOperators, + List.of(extractFieldsOperator, mergePositionsOperator), outputOperator, Driver.DEFAULT_STATUS_INTERVAL, - localBreaker + Releasables.wrap(searchContext, localBreaker) ); task.addListener(() -> { String reason = Objects.requireNonNullElse(task.getReasonCancelled(), "task was cancelled"); driver.cancel(reason); }); - var threadContext = transportService.getThreadPool().getThreadContext(); - localBreaker = null; Driver.start(threadContext, executor, driver, Driver.DEFAULT_MAX_ITERATIONS, listener.map(ignored -> { Page out = result.get(); if (out == null) { @@ -369,11 +346,52 @@ private void doLookup( } return out; })); + started = true; } catch (Exception e) { listener.onFailure(e); } finally { - Releasables.close(selectedPositions, localBreaker); + if (started == false) { + Releasables.close(releasables); + } + } + } + + private static Operator extractFieldsOperator( + SearchContext searchContext, + DriverContext driverContext, + List extractFields + ) { + EsPhysicalOperationProviders.ShardContext shardContext = new EsPhysicalOperationProviders.DefaultShardContext( + 0, + searchContext.getSearchExecutionContext(), + searchContext.request().getAliasFilter() + ); + List fields = new ArrayList<>(extractFields.size()); + for (NamedExpression extractField : extractFields) { + BlockLoader loader = shardContext.blockLoader( + extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), + extractField.dataType() == DataType.UNSUPPORTED, + MappedFieldType.FieldExtractPreference.NONE + ); + fields.add( + new ValuesSourceReaderOperator.FieldInfo( + extractField.name(), + PlannerUtils.toElementType(extractField.dataType()), + shardIdx -> { + if (shardIdx != 0) { + throw new IllegalStateException("only one shard"); + } + return loader; + } + ) + ); } + return new ValuesSourceReaderOperator( + driverContext.blockFactory(), + fields, + List.of(new ValuesSourceReaderOperator.ShardContext(searchContext.searcher().getIndexReader(), searchContext::newSourceLoader)), + 0 + ); } private Page createNullResponse(int positionCount, List extractFields) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java index e53d11854cc63..44443973764e6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.io.IOException; import java.util.List; @@ -30,7 +29,7 @@ public ResolvedEnrichPolicy(StreamInput in) throws IOException { in.readString(), in.readStringCollectionAsList(), in.readMap(StreamInput::readString), - in.readMap(StreamInput::readString, ResolvedEnrichPolicy::readEsField) + in.readMap(EsField::new) ); } @@ -40,25 +39,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(matchType); out.writeStringCollection(enrichFields); out.writeMap(concreteIndices, StreamOutput::writeString); - out.writeMap(mapping, ResolvedEnrichPolicy::writeEsField); - } - - // TODO: we should have made EsField and DataType Writable, but write it as NamedWritable in PlanStreamInput - private static void writeEsField(StreamOutput out, EsField field) throws IOException { - out.writeString(field.getName()); - out.writeString(field.getDataType().typeName()); - out.writeMap(field.getProperties(), ResolvedEnrichPolicy::writeEsField); - out.writeBoolean(field.isAggregatable()); - out.writeBoolean(field.isAlias()); - } - - private static EsField readEsField(StreamInput in) throws IOException { - return new EsField( - in.readString(), - EsqlDataTypes.fromTypeName(in.readString()), - in.readMap(ResolvedEnrichPolicy::readEsField), - in.readBoolean(), - in.readBoolean() + out.writeMap( + mapping, + /* + * There are lots of subtypes of ESField, but we always write the field + * as though it were the base class. + */ + (o, v) -> new EsField(v.getName(), v.getDataType(), v.getProperties(), v.isAggregatable(), v.isAlias()).writeTo(o) ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index f4979fa9928db..4e07c3084ab7b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.PreAnalyzer; import org.elasticsearch.xpack.esql.analysis.Verifier; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; @@ -21,16 +20,19 @@ import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.session.IndexResolver; +import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.stats.Metrics; import org.elasticsearch.xpack.esql.stats.QueryMetric; +import java.util.function.BiConsumer; + import static org.elasticsearch.action.ActionListener.wrap; public class PlanExecutor { private final IndexResolver indexResolver; private final PreAnalyzer preAnalyzer; - private final FunctionRegistry functionRegistry; + private final EsqlFunctionRegistry functionRegistry; private final Mapper mapper; private final Metrics metrics; private final Verifier verifier; @@ -49,7 +51,8 @@ public void esql( String sessionId, EsqlConfiguration cfg, EnrichPolicyResolver enrichPolicyResolver, - ActionListener listener + BiConsumer> runPhase, + ActionListener listener ) { final var session = new EsqlSession( sessionId, @@ -64,7 +67,7 @@ public void esql( ); QueryMetric clientId = QueryMetric.fromString("rest"); metrics.total(clientId); - session.execute(request, wrap(listener::onResponse, ex -> { + session.execute(request, runPhase, wrap(listener::onResponse, ex -> { // TODO when we decide if we will differentiate Kibana from REST, this String value will likely come from the request metrics.failed(clientId); listener.onFailure(ex); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java index 11a98d3a11504..dd51c5ba6473a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; import java.util.List; @@ -30,7 +29,7 @@ public Order(Source source, Expression child, OrderDirection direction, NullsPos public Order(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), + in.readNamedWriteable(Expression.class), in.readEnum(org.elasticsearch.xpack.esql.core.expression.Order.OrderDirection.class), in.readEnum(org.elasticsearch.xpack.esql.core.expression.Order.NullsPosition.class) ); @@ -39,7 +38,7 @@ public Order(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); - ((PlanStreamOutput) out).writeExpression(child()); + out.writeNamedWriteable(child()); out.writeEnum(direction()); out.writeEnum(nullsPosition()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java index dffa723a1f3dd..ffcc26cb6f188 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Validations.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.expression; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expression.TypeResolution; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 7034f23be1662..9a4236cbd96fd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -7,11 +7,17 @@ package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.Build; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.xpack.esql.core.ParsingException; +import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.Function; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionDefinition; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; import org.elasticsearch.xpack.esql.core.session.Configuration; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.Check; import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; @@ -20,10 +26,12 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; -import org.elasticsearch.xpack.esql.expression.function.aggregate.TopList; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Top; import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; +import org.elasticsearch.xpack.esql.expression.function.aggregate.WeightedAvg; import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; @@ -95,6 +103,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; @@ -118,13 +127,19 @@ import java.lang.reflect.Constructor; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.BiFunction; +import java.util.regex.Pattern; import java.util.stream.Collectors; +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; +import static java.util.stream.Collectors.toList; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; @@ -141,7 +156,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.UNSUPPORTED; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; -public final class EsqlFunctionRegistry extends FunctionRegistry { +public class EsqlFunctionRegistry { private static final Map, List> dataTypesForStringLiteralConversion = new LinkedHashMap<>(); @@ -169,6 +184,17 @@ public final class EsqlFunctionRegistry extends FunctionRegistry { } } + // Translation table for error messaging in the following function + private static final String[] NUM_NAMES = { "zero", "one", "two", "three", "four", "five", }; + + // list of functions grouped by type of functions (aggregate, statistics, math etc) and ordered alphabetically inside each group + // a single function will have one entry for itself with its name associated to its instance and, also, one entry for each alias + // it has with the alias name associated to the FunctionDefinition instance + private final Map defs = new LinkedHashMap<>(); + private final Map aliases = new HashMap<>(); + + private SnapshotFunctionRegistry snapshotRegistry = null; + public EsqlFunctionRegistry() { register(functions()); buildDataTypesForStringLiteralConversion(functions()); @@ -178,6 +204,42 @@ public EsqlFunctionRegistry() { register(functions); } + public FunctionDefinition resolveFunction(String functionName) { + FunctionDefinition def = defs.get(functionName); + if (def == null) { + throw new QlIllegalArgumentException("Cannot find function {}; this should have been caught during analysis", functionName); + } + return def; + } + + private String normalize(String name) { + return name.toLowerCase(Locale.ROOT); + } + + public String resolveAlias(String alias) { + String normalized = normalize(alias); + return aliases.getOrDefault(normalized, normalized); + } + + public boolean functionExists(String functionName) { + return defs.containsKey(functionName); + } + + public Collection listFunctions() { + // It is worth double checking if we need this copy. These are immutable anyway. + return defs.values(); + } + + public Collection listFunctions(String pattern) { + // It is worth double checking if we need this copy. These are immutable anyway. + Pattern p = Strings.hasText(pattern) ? Pattern.compile(normalize(pattern)) : null; + return defs.entrySet() + .stream() + .filter(e -> p == null || p.matcher(e.getKey()).matches()) + .map(e -> cloneDefinition(e.getKey(), e.getValue())) + .collect(toList()); + } + private FunctionDefinition[][] functions() { return new FunctionDefinition[][] { // grouping functions @@ -193,8 +255,9 @@ private FunctionDefinition[][] functions() { def(Min.class, Min::new, "min"), def(Percentile.class, Percentile::new, "percentile"), def(Sum.class, Sum::new, "sum"), - def(TopList.class, TopList::new, "top_list"), - def(Values.class, Values::new, "values") }, + def(Top.class, Top::new, "top"), + def(Values.class, Values::new, "values"), + def(WeightedAvg.class, WeightedAvg::new, "weighted_avg") }, // math new FunctionDefinition[] { def(Abs.class, Abs::new, "abs"), @@ -254,6 +317,7 @@ private FunctionDefinition[][] functions() { def(SpatialDisjoint.class, SpatialDisjoint::new, "st_disjoint"), def(SpatialIntersects.class, SpatialIntersects::new, "st_intersects"), def(SpatialWithin.class, SpatialWithin::new, "st_within"), + def(StDistance.class, StDistance::new, "st_distance"), def(StX.class, StX::new, "st_x"), def(StY.class, StY::new, "st_y") }, // conditional @@ -301,9 +365,20 @@ private FunctionDefinition[][] functions() { def(Split.class, Split::new, "split") } }; } - @Override - protected String normalize(String name) { - return normalizeName(name); + private static FunctionDefinition[][] snapshotFunctions() { + return new FunctionDefinition[][] { new FunctionDefinition[] { def(Rate.class, Rate::withUnresolvedTimestamp, "rate") } }; + } + + public EsqlFunctionRegistry snapshotRegistry() { + if (Build.current().isSnapshot() == false) { + return this; + } + var snapshotRegistry = this.snapshotRegistry; + if (snapshotRegistry == null) { + snapshotRegistry = new SnapshotFunctionRegistry(); + this.snapshotRegistry = snapshotRegistry; + } + return snapshotRegistry; } public static String normalizeName(String name) { @@ -443,4 +518,381 @@ private void buildDataTypesForStringLiteralConversion(FunctionDefinition[]... gr public List getDataTypeForStringLiteralConversion(Class clazz) { return dataTypesForStringLiteralConversion.get(clazz); } + + private static class SnapshotFunctionRegistry extends EsqlFunctionRegistry { + SnapshotFunctionRegistry() { + if (Build.current().isSnapshot() == false) { + throw new IllegalStateException("build snapshot function registry for non-snapshot build"); + } + register(snapshotFunctions()); + } + } + + void register(FunctionDefinition[]... groupFunctions) { + for (FunctionDefinition[] group : groupFunctions) { + register(group); + } + } + + void register(FunctionDefinition... functions) { + // temporary map to hold [function_name/alias_name : function instance] + Map batchMap = new HashMap<>(); + for (FunctionDefinition f : functions) { + batchMap.put(f.name(), f); + for (String alias : f.aliases()) { + Object old = batchMap.put(alias, f); + if (old != null || defs.containsKey(alias)) { + throw new QlIllegalArgumentException( + "alias [" + + alias + + "] is used by " + + "[" + + (old != null ? old : defs.get(alias).name()) + + "] and [" + + f.name() + + "]" + ); + } + aliases.put(alias, f.name()); + } + } + // sort the temporary map by key name and add it to the global map of functions + defs.putAll( + batchMap.entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .collect( + Collectors.< + Map.Entry, + String, + FunctionDefinition, + LinkedHashMap>toMap( + Map.Entry::getKey, + Map.Entry::getValue, + (oldValue, newValue) -> oldValue, + LinkedHashMap::new + ) + ) + ); + } + + protected FunctionDefinition cloneDefinition(String name, FunctionDefinition definition) { + return new FunctionDefinition(name, emptyList(), definition.clazz(), definition.builder()); + } + + protected interface FunctionBuilder { + Function build(Source source, List children, Configuration cfg); + } + + /** + * Main method to register a function. + * + * @param names Must always have at least one entry which is the method's primary name + */ + @SuppressWarnings("overloads") + protected static FunctionDefinition def(Class function, FunctionBuilder builder, String... names) { + Check.isTrue(names.length > 0, "At least one name must be provided for the function"); + String primaryName = names[0]; + List aliases = Arrays.asList(names).subList(1, names.length); + FunctionDefinition.Builder realBuilder = (uf, cfg, extras) -> { + if (CollectionUtils.isEmpty(extras) == false) { + throw new ParsingException( + uf.source(), + "Unused parameters {} detected when building [{}]", + Arrays.toString(extras), + primaryName + ); + } + try { + return builder.build(uf.source(), uf.children(), cfg); + } catch (QlIllegalArgumentException e) { + throw new ParsingException(e, uf.source(), "error building [{}]: {}", primaryName, e.getMessage()); + } + }; + return new FunctionDefinition(primaryName, unmodifiableList(aliases), function, realBuilder); + } + + /** + * Build a {@linkplain FunctionDefinition} for a no-argument function. + */ + public static FunctionDefinition def( + Class function, + java.util.function.Function ctorRef, + String... names + ) { + FunctionBuilder builder = (source, children, cfg) -> { + if (false == children.isEmpty()) { + throw new QlIllegalArgumentException("expects no arguments"); + } + return ctorRef.apply(source); + }; + return def(function, builder, names); + } + + /** + * Build a {@linkplain FunctionDefinition} for a unary function. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + public static FunctionDefinition def( + Class function, + BiFunction ctorRef, + String... names + ) { + FunctionBuilder builder = (source, children, cfg) -> { + if (children.size() != 1) { + throw new QlIllegalArgumentException("expects exactly one argument"); + } + return ctorRef.apply(source, children.get(0)); + }; + return def(function, builder, names); + } + + /** + * Build a {@linkplain FunctionDefinition} for multi-arg/n-ary function. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + protected FunctionDefinition def(Class function, NaryBuilder ctorRef, String... names) { + FunctionBuilder builder = (source, children, cfg) -> { return ctorRef.build(source, children); }; + return def(function, builder, names); + } + + protected interface NaryBuilder { + T build(Source source, List children); + } + + /** + * Build a {@linkplain FunctionDefinition} for a binary function. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + public static FunctionDefinition def(Class function, BinaryBuilder ctorRef, String... names) { + FunctionBuilder builder = (source, children, cfg) -> { + boolean isBinaryOptionalParamFunction = OptionalArgument.class.isAssignableFrom(function); + if (isBinaryOptionalParamFunction && (children.size() > 2 || children.size() < 1)) { + throw new QlIllegalArgumentException("expects one or two arguments"); + } else if (isBinaryOptionalParamFunction == false && children.size() != 2) { + throw new QlIllegalArgumentException("expects exactly two arguments"); + } + + return ctorRef.build(source, children.get(0), children.size() == 2 ? children.get(1) : null); + }; + return def(function, builder, names); + } + + public interface BinaryBuilder { + T build(Source source, Expression left, Expression right); + } + + /** + * Build a {@linkplain FunctionDefinition} for a ternary function. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + protected static FunctionDefinition def(Class function, TernaryBuilder ctorRef, String... names) { + FunctionBuilder builder = (source, children, cfg) -> { + boolean hasMinimumTwo = OptionalArgument.class.isAssignableFrom(function); + if (hasMinimumTwo && (children.size() > 3 || children.size() < 2)) { + throw new QlIllegalArgumentException("expects two or three arguments"); + } else if (hasMinimumTwo == false && children.size() != 3) { + throw new QlIllegalArgumentException("expects exactly three arguments"); + } + return ctorRef.build(source, children.get(0), children.get(1), children.size() == 3 ? children.get(2) : null); + }; + return def(function, builder, names); + } + + protected interface TernaryBuilder { + T build(Source source, Expression one, Expression two, Expression three); + } + + /** + * Build a {@linkplain FunctionDefinition} for a quaternary function. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + protected static FunctionDefinition def(Class function, QuaternaryBuilder ctorRef, String... names) { + FunctionBuilder builder = (source, children, cfg) -> { + if (OptionalArgument.class.isAssignableFrom(function)) { + if (children.size() > 4 || children.size() < 3) { + throw new QlIllegalArgumentException("expects three or four arguments"); + } + } else if (TwoOptionalArguments.class.isAssignableFrom(function)) { + if (children.size() > 4 || children.size() < 2) { + throw new QlIllegalArgumentException("expects minimum two, maximum four arguments"); + } + } else if (children.size() != 4) { + throw new QlIllegalArgumentException("expects exactly four arguments"); + } + return ctorRef.build( + source, + children.get(0), + children.get(1), + children.size() > 2 ? children.get(2) : null, + children.size() > 3 ? children.get(3) : null + ); + }; + return def(function, builder, names); + } + + protected interface QuaternaryBuilder { + T build(Source source, Expression one, Expression two, Expression three, Expression four); + } + + /** + * Build a {@linkplain FunctionDefinition} for a quinary function. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + protected static FunctionDefinition def( + Class function, + QuinaryBuilder ctorRef, + int numOptionalParams, + String... names + ) { + FunctionBuilder builder = (source, children, cfg) -> { + final int NUM_TOTAL_PARAMS = 5; + boolean hasOptionalParams = OptionalArgument.class.isAssignableFrom(function); + if (hasOptionalParams && (children.size() > NUM_TOTAL_PARAMS || children.size() < NUM_TOTAL_PARAMS - numOptionalParams)) { + throw new QlIllegalArgumentException( + "expects between " + + NUM_NAMES[NUM_TOTAL_PARAMS - numOptionalParams] + + " and " + + NUM_NAMES[NUM_TOTAL_PARAMS] + + " arguments" + ); + } else if (hasOptionalParams == false && children.size() != NUM_TOTAL_PARAMS) { + throw new QlIllegalArgumentException("expects exactly " + NUM_NAMES[NUM_TOTAL_PARAMS] + " arguments"); + } + return ctorRef.build( + source, + children.size() > 0 ? children.get(0) : null, + children.size() > 1 ? children.get(1) : null, + children.size() > 2 ? children.get(2) : null, + children.size() > 3 ? children.get(3) : null, + children.size() > 4 ? children.get(4) : null + ); + }; + return def(function, builder, names); + } + + protected interface QuinaryBuilder { + T build(Source source, Expression one, Expression two, Expression three, Expression four, Expression five); + } + + /** + * Build a {@linkplain FunctionDefinition} for functions with a mandatory argument followed by a varidic list. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + protected static FunctionDefinition def(Class function, UnaryVariadicBuilder ctorRef, String... names) { + FunctionBuilder builder = (source, children, cfg) -> { + boolean hasMinimumOne = OptionalArgument.class.isAssignableFrom(function); + if (hasMinimumOne && children.size() < 1) { + throw new QlIllegalArgumentException("expects at least one argument"); + } else if (hasMinimumOne == false && children.size() < 2) { + throw new QlIllegalArgumentException("expects at least two arguments"); + } + return ctorRef.build(source, children.get(0), children.subList(1, children.size())); + }; + return def(function, builder, names); + } + + protected interface UnaryVariadicBuilder { + T build(Source source, Expression exp, List variadic); + } + + /** + * Build a {@linkplain FunctionDefinition} for a no-argument function that is configuration aware. + */ + @SuppressWarnings("overloads") + protected static FunctionDefinition def(Class function, ConfigurationAwareBuilder ctorRef, String... names) { + FunctionBuilder builder = (source, children, cfg) -> { + if (false == children.isEmpty()) { + throw new QlIllegalArgumentException("expects no arguments"); + } + return ctorRef.build(source, cfg); + }; + return def(function, builder, names); + } + + protected interface ConfigurationAwareBuilder { + T build(Source source, Configuration configuration); + } + + /** + * Build a {@linkplain FunctionDefinition} for a one-argument function that is configuration aware. + */ + @SuppressWarnings("overloads") + public static FunctionDefinition def( + Class function, + UnaryConfigurationAwareBuilder ctorRef, + String... names + ) { + FunctionBuilder builder = (source, children, cfg) -> { + if (children.size() > 1) { + throw new QlIllegalArgumentException("expects exactly one argument"); + } + Expression ex = children.size() == 1 ? children.get(0) : null; + return ctorRef.build(source, ex, cfg); + }; + return def(function, builder, names); + } + + public interface UnaryConfigurationAwareBuilder { + T build(Source source, Expression exp, Configuration configuration); + } + + /** + * Build a {@linkplain FunctionDefinition} for a binary function that is configuration aware. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + protected static FunctionDefinition def( + Class function, + BinaryConfigurationAwareBuilder ctorRef, + String... names + ) { + FunctionBuilder builder = (source, children, cfg) -> { + boolean isBinaryOptionalParamFunction = OptionalArgument.class.isAssignableFrom(function); + if (isBinaryOptionalParamFunction && (children.size() > 2 || children.size() < 1)) { + throw new QlIllegalArgumentException("expects one or two arguments"); + } else if (isBinaryOptionalParamFunction == false && children.size() != 2) { + throw new QlIllegalArgumentException("expects exactly two arguments"); + } + return ctorRef.build(source, children.get(0), children.size() == 2 ? children.get(1) : null, cfg); + }; + return def(function, builder, names); + } + + protected interface BinaryConfigurationAwareBuilder { + T build(Source source, Expression left, Expression right, Configuration configuration); + } + + /** + * Build a {@linkplain FunctionDefinition} for a ternary function that is configuration aware. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + protected FunctionDefinition def(Class function, TernaryConfigurationAwareBuilder ctorRef, String... names) { + FunctionBuilder builder = (source, children, cfg) -> { + boolean hasMinimumTwo = OptionalArgument.class.isAssignableFrom(function); + if (hasMinimumTwo && (children.size() > 3 || children.size() < 2)) { + throw new QlIllegalArgumentException("expects two or three arguments"); + } else if (hasMinimumTwo == false && children.size() != 3) { + throw new QlIllegalArgumentException("expects exactly three arguments"); + } + return ctorRef.build(source, children.get(0), children.get(1), children.size() == 3 ? children.get(2) : null, cfg); + }; + return def(function, builder, names); + } + + protected interface TernaryConfigurationAwareBuilder { + T build(Source source, Expression one, Expression two, Expression three, Configuration configuration); + } + + // + // Utility method for extra argument extraction. + // + protected static Boolean asBool(Object[] extras) { + if (CollectionUtils.isEmpty(extras)) { + return null; + } + if (extras.length != 1 || (extras[0] instanceof Boolean) == false) { + throw new QlIllegalArgumentException("Invalid number and types of arguments given to function definition"); + } + return (Boolean) extras[0]; + } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionDefinition.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionDefinition.java similarity index 87% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionDefinition.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionDefinition.java index 09f68c5c9b4a3..d93fc077dece4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionDefinition.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionDefinition.java @@ -4,8 +4,9 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.expression.function; +package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.xpack.esql.core.expression.function.Function; import org.elasticsearch.xpack.esql.core.session.Configuration; import java.util.List; @@ -14,7 +15,7 @@ public class FunctionDefinition { /** - * Converts an {@link UnresolvedFunction} into the a proper {@link Function}. + * Converts an {@link UnresolvedFunction} into a proper {@link Function}. *

      * Provides the basic signature (unresolved function + runtime configuration object) while * allowing extensions through the vararg extras which subclasses should expand for their @@ -49,7 +50,7 @@ public Class clazz() { return clazz; } - protected Builder builder() { + public Builder builder() { return builder; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionInfo.java index 98c191eddfb06..801d18b7a3801 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionInfo.java @@ -25,7 +25,7 @@ /** * The description of the function rendered in {@code META FUNCTIONS} - * and the docs. + * and the docs. These should be complete sentences. */ String description() default ""; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionResolutionStrategy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionResolutionStrategy.java similarity index 91% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionResolutionStrategy.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionResolutionStrategy.java index a23112267dcf4..4e7f47db0b252 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/FunctionResolutionStrategy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/FunctionResolutionStrategy.java @@ -5,8 +5,9 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.expression.function; +package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.xpack.esql.core.expression.function.Function; import org.elasticsearch.xpack.esql.core.session.Configuration; /** diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/OptionalArgument.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/OptionalArgument.java similarity index 71% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/OptionalArgument.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/OptionalArgument.java index 90d1d06337330..ba80395281203 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/OptionalArgument.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/OptionalArgument.java @@ -5,11 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.expression.function; +package org.elasticsearch.xpack.esql.expression.function; /** * Marker interface indicating that a function accepts one optional argument (typically the last one). - * This is used by the {@link FunctionRegistry} to perform validation of function declaration. + * This is used by the {@link EsqlFunctionRegistry} to perform validation of function declaration. */ public interface OptionalArgument { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/TwoOptionalArguments.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/TwoOptionalArguments.java similarity index 71% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/TwoOptionalArguments.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/TwoOptionalArguments.java index 78684f034f448..38bb23285e491 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/TwoOptionalArguments.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/TwoOptionalArguments.java @@ -5,11 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.expression.function; +package org.elasticsearch.xpack.esql.expression.function; /** * Marker interface indicating that a function accepts two optional arguments (the last two). - * This is used by the {@link FunctionRegistry} to perform validation of function declaration. + * This is used by the {@link EsqlFunctionRegistry} to perform validation of function declaration. */ public interface TwoOptionalArguments { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/UnresolvedFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnresolvedFunction.java similarity index 91% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/UnresolvedFunction.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnresolvedFunction.java index 012c39e26d904..ab3475635ddbd 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/UnresolvedFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnresolvedFunction.java @@ -4,18 +4,21 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.expression.function; +package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; import org.elasticsearch.xpack.esql.core.capabilities.UnresolvedException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; +import org.elasticsearch.xpack.esql.core.expression.function.Function; import org.elasticsearch.xpack.esql.core.session.Configuration; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.StringUtils; +import java.io.IOException; import java.util.LinkedHashSet; import java.util.List; import java.util.Objects; @@ -38,13 +41,23 @@ public UnresolvedFunction(Source source, String name, FunctionResolutionStrategy this(source, name, resolutionStrategy, children, false, null); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + /** * Constructor used for specifying a more descriptive message (typically * 'did you mean') instead of the default one. * * @see #withMessage(String) */ - UnresolvedFunction( + public UnresolvedFunction( Source source, String name, FunctionResolutionStrategy resolutionStrategy, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java index 79dcc6a3d3920..22c4aa9c6bf07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; @@ -42,6 +43,11 @@ public final class UnsupportedAttribute extends FieldAttribute implements Unreso ENTRY.name, UnsupportedAttribute::new ); + public static final NamedWriteableRegistry.Entry EXPRESSION_ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + ENTRY.name, + UnsupportedAttribute::new + ); private final String message; private final boolean hasCustomMessage; // TODO remove me and just use message != null? diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java index 0df1ae078171d..f0acac0e9744e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java @@ -6,12 +6,17 @@ */ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.function.Function; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.List; import java.util.Objects; @@ -23,6 +28,27 @@ * A type of {@code Function} that takes multiple values and extracts a single value out of them. For example, {@code AVG()}. */ public abstract class AggregateFunction extends Function { + public static List getNamedWriteables() { + return List.of( + Avg.ENTRY, + Count.ENTRY, + CountDistinct.ENTRY, + Max.ENTRY, + Median.ENTRY, + MedianAbsoluteDeviation.ENTRY, + Min.ENTRY, + Percentile.ENTRY, + Rate.ENTRY, + SpatialCentroid.ENTRY, + Sum.ENTRY, + Top.ENTRY, + Values.ENTRY, + // internal functions + ToPartial.ENTRY, + FromPartial.ENTRY, + WeightedAvg.ENTRY + ); + } private final Expression field; private final List parameters; @@ -37,6 +63,16 @@ protected AggregateFunction(Source source, Expression field, List parameters() { return parameters; } + /** + * Returns the input expressions used in aggregation. + * Defaults to a list containing the only the input field. + */ + public List inputExpressions() { + return List.of(field); + } + @Override protected TypeResolution resolveType() { return TypeResolutions.isExact(field, sourceText(), DEFAULT); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java index ee75980e10264..b5c0b8e5ffdc8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Avg.java @@ -7,24 +7,42 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; +import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAvg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; public class Avg extends AggregateFunction implements SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Avg", Avg::new); - @FunctionInfo(returnType = "double", description = "The average of a numeric field.", isAggregation = true) + @FunctionInfo( + returnType = "double", + description = "The average of a numeric field.", + isAggregation = true, + examples = { + @Example(file = "stats", tag = "avg"), + @Example( + description = "The expression can use inline functions. For example, to calculate the average " + + "over a multivalued column, first use `MV_AVG` to average the multiple values per row, " + + "and use the result with the `AVG` function", + file = "stats", + tag = "docsStatsAvgNestedExpression" + ) } + ) public Avg(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { super(source, field); } @@ -40,6 +58,15 @@ protected Expression.TypeResolution resolveType() { ); } + private Avg(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.DOUBLE; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java index d55bc9d618c39..52e053f843e14 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountAggregatorFunction; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -23,14 +25,15 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.planner.ToAggregator; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; public class Count extends AggregateFunction implements EnclosedAgg, ToAggregator, SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Count", Count::new); @FunctionInfo(returnType = "long", description = "Returns the total number (count) of input values.", isAggregation = true) public Count( @@ -57,6 +60,15 @@ public Count( super(source, field); } + private Count(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Count::new, field()); @@ -89,7 +101,7 @@ public Nullability nullable() { @Override protected TypeResolution resolveType() { - return isType(field(), dt -> EsqlDataTypes.isCounterType(dt) == false, sourceText(), DEFAULT, "any type except counter types"); + return isType(field(), dt -> dt.isCounter() == false, sourceText(), DEFAULT, "any type except counter types"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index c91b9c37ae0a3..5e61f69758a47 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountDistinctBooleanAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountDistinctBytesRefAggregatorFunctionSupplier; @@ -16,29 +19,37 @@ import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvCount; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvDedupe; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.planner.ToAggregator; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isFoldable; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isInteger; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isWholeNumber; public class CountDistinct extends AggregateFunction implements OptionalArgument, ToAggregator, SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "CountDistinct", + CountDistinct::new + ); + private static final int DEFAULT_PRECISION = 3000; private final Expression precision; @@ -56,6 +67,26 @@ public CountDistinct( this.precision = precision; } + private CountDistinct(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + out.writeNamedWriteable(field()); + out.writeOptionalNamedWriteable(precision); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, CountDistinct::new, field(), precision); @@ -93,7 +124,7 @@ protected TypeResolution resolveType() { if (resolution.unresolved() || precision == null) { return resolution; } - return isInteger(precision, sourceText(), SECOND).and(isFoldable(precision, sourceText(), SECOND)); + return isWholeNumber(precision, sourceText(), SECOND).and(isFoldable(precision, sourceText(), SECOND)); } @Override @@ -128,4 +159,8 @@ public Expression surrogate() { ? new ToLong(s, new Coalesce(s, new MvCount(s, new MvDedupe(s, field)), List.of(new Literal(s, 0, DataType.INTEGER)))) : null; } + + Expression precision() { + return precision; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/FromPartial.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/FromPartial.java new file mode 100644 index 0000000000000..593e6fa463371 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/FromPartial.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.aggregation.Aggregator; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.FromPartialAggregatorFunction; +import org.elasticsearch.compute.aggregation.FromPartialGroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.GroupingAggregator; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.ToAggregator; + +import java.io.IOException; +import java.util.List; +import java.util.stream.IntStream; + +/** + * @see ToPartial + */ +public class FromPartial extends AggregateFunction implements ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "FromPartial", + FromPartial::new + ); + + private final Expression function; + + public FromPartial(Source source, Expression field, Expression function) { + super(source, field, List.of(function)); + this.function = function; + } + + private FromPartial(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(field()); + out.writeNamedWriteable(function); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + public Expression function() { + return function; + } + + @Override + public DataType dataType() { + return function.dataType(); + } + + @Override + protected TypeResolution resolveType() { + return TypeResolution.TYPE_RESOLVED; + } + + @Override + public AttributeSet references() { + return field().references(); // exclude the function and its argument + } + + @Override + public Expression replaceChildren(List newChildren) { + return new FromPartial(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, FromPartial::new, field(), function); + } + + @Override + public AggregatorFunctionSupplier supplier(List inputChannels) { + final ToAggregator toAggregator = (ToAggregator) function; + if (inputChannels.size() != 1) { + assert false : "from_partial aggregation requires exactly one input channel; got " + inputChannels; + throw new IllegalArgumentException("from_partial aggregation requires exactly one input channel; got " + inputChannels); + } + final int inputChannel = inputChannels.get(0); + return new AggregatorFunctionSupplier() { + @Override + public AggregatorFunction aggregator(DriverContext driverContext) { + assert false : "aggregatorFactory() is override"; + throw new UnsupportedOperationException(); + } + + @Override + public GroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + assert false : "groupingAggregatorFactory() is override"; + throw new UnsupportedOperationException(); + } + + @Override + public Aggregator.Factory aggregatorFactory(AggregatorMode mode) { + final AggregatorFunctionSupplier supplier; + try (var dummy = toAggregator.supplier(inputChannels).aggregator(DriverContext.getLocalDriver())) { + var intermediateChannels = IntStream.range(0, dummy.intermediateBlockCount()).boxed().toList(); + supplier = toAggregator.supplier(intermediateChannels); + } + return new Aggregator.Factory() { + @Override + public Aggregator apply(DriverContext driverContext) { + // use groupingAggregator since we can receive intermediate output from a grouping aggregate + final var groupingAggregator = supplier.groupingAggregator(driverContext); + return new Aggregator(new FromPartialAggregatorFunction(driverContext, groupingAggregator, inputChannel), mode); + } + + @Override + public String describe() { + return "from_partial(" + supplier.describe() + ")"; + } + }; + } + + @Override + public GroupingAggregator.Factory groupingAggregatorFactory(AggregatorMode mode) { + final AggregatorFunctionSupplier supplier; + try (var dummy = toAggregator.supplier(inputChannels).aggregator(DriverContext.getLocalDriver())) { + var intermediateChannels = IntStream.range(0, dummy.intermediateBlockCount()).boxed().toList(); + supplier = toAggregator.supplier(intermediateChannels); + } + return new GroupingAggregator.Factory() { + @Override + public GroupingAggregator apply(DriverContext driverContext) { + final GroupingAggregatorFunction aggregator = supplier.groupingAggregator(driverContext); + return new GroupingAggregator(new FromPartialGroupingAggregatorFunction(aggregator, inputChannel), mode); + } + + @Override + public String describe() { + return "from_partial(" + supplier.describe() + ")"; + } + }; + } + + @Override + public String describe() { + return "from_partial"; + } + }; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 1c1139c197ac0..98748fad681c2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -7,32 +7,61 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MaxBooleanAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; +import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; +import org.elasticsearch.xpack.esql.planner.ToAggregator; +import java.io.IOException; import java.util.List; -public class Max extends NumericAggregate implements SurrogateExpression { +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class Max extends AggregateFunction implements ToAggregator, SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Max", Max::new); @FunctionInfo( - returnType = { "double", "integer", "long", "date" }, - description = "The maximum value of a numeric field.", - isAggregation = true + returnType = { "boolean", "double", "integer", "long", "date" }, + description = "The maximum value of a field.", + isAggregation = true, + examples = { + @Example(file = "stats", tag = "max"), + @Example( + description = "The expression can use inline functions. For example, to calculate the maximum " + + "over an average of a multivalued column, use `MV_AVG` to first average the " + + "multiple values per row, and use the result with the `MAX` function", + file = "stats", + tag = "docsStatsMaxNestedExpression" + ) } ) - public Max(Source source, @Param(name = "number", type = { "double", "integer", "long", "date" }) Expression field) { + public Max(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date" }) Expression field) { super(source, field); } + private Max(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Max::new, field()); @@ -44,8 +73,16 @@ public Max replaceChildren(List newChildren) { } @Override - protected boolean supportsDates() { - return true; + protected TypeResolution resolveType() { + return TypeResolutions.isType( + this, + e -> e == DataType.BOOLEAN || e == DataType.DATETIME || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + sourceText(), + DEFAULT, + "boolean", + "datetime", + "numeric except unsigned_long or counter types" + ); } @Override @@ -54,18 +91,21 @@ public DataType dataType() { } @Override - protected AggregatorFunctionSupplier longSupplier(List inputChannels) { - return new MaxLongAggregatorFunctionSupplier(inputChannels); - } - - @Override - protected AggregatorFunctionSupplier intSupplier(List inputChannels) { - return new MaxIntAggregatorFunctionSupplier(inputChannels); - } - - @Override - protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { - return new MaxDoubleAggregatorFunctionSupplier(inputChannels); + public final AggregatorFunctionSupplier supplier(List inputChannels) { + DataType type = field().dataType(); + if (type == DataType.BOOLEAN) { + return new MaxBooleanAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.LONG || type == DataType.DATETIME) { + return new MaxLongAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.INTEGER) { + return new MaxIntAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.DOUBLE) { + return new MaxDoubleAggregatorFunctionSupplier(inputChannels); + } + throw EsqlIllegalArgumentException.illegalDataType(type); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java index c381693dbe2ce..36207df331e47 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Median.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.QuantileStates; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; @@ -19,12 +21,15 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; public class Median extends AggregateFunction implements SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Median", Median::new); + // TODO: Add the compression parameter @FunctionInfo( returnType = { "double", "integer", "long" }, @@ -46,6 +51,15 @@ protected Expression.TypeResolution resolveType() { ); } + private Median(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.DOUBLE; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java index db25ad6c8c41f..23d55942cc72f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MedianAbsoluteDeviationIntAggregatorFunctionSupplier; @@ -17,9 +19,15 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import java.io.IOException; import java.util.List; public class MedianAbsoluteDeviation extends NumericAggregate { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "MedianAbsoluteDeviation", + MedianAbsoluteDeviation::new + ); // TODO: Add parameter @FunctionInfo( @@ -31,6 +39,15 @@ public MedianAbsoluteDeviation(Source source, @Param(name = "number", type = { " super(source, field); } + private MedianAbsoluteDeviation(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, MedianAbsoluteDeviation::new, field()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index ecfc2200a3643..f712786bcff4b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -7,65 +7,105 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.MinBooleanAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; +import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; +import org.elasticsearch.xpack.esql.planner.ToAggregator; +import java.io.IOException; import java.util.List; -public class Min extends NumericAggregate implements SurrogateExpression { +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class Min extends AggregateFunction implements ToAggregator, SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Min", Min::new); @FunctionInfo( - returnType = { "double", "integer", "long", "date" }, - description = "The minimum value of a numeric field.", - isAggregation = true + returnType = { "boolean", "double", "integer", "long", "date" }, + description = "The minimum value of a field.", + isAggregation = true, + examples = { + @Example(file = "stats", tag = "min"), + @Example( + description = "The expression can use inline functions. For example, to calculate the minimum " + + "over an average of a multivalued column, use `MV_AVG` to first average the " + + "multiple values per row, and use the result with the `MIN` function", + file = "stats", + tag = "docsStatsMinNestedExpression" + ) } ) - public Min(Source source, @Param(name = "number", type = { "double", "integer", "long", "date" }) Expression field) { + public Min(Source source, @Param(name = "field", type = { "boolean", "double", "integer", "long", "date" }) Expression field) { super(source, field); } - @Override - protected NodeInfo info() { - return NodeInfo.create(this, Min::new, field()); + private Min(StreamInput in) throws IOException { + super(in); } @Override - public Min replaceChildren(List newChildren) { - return new Min(source(), newChildren.get(0)); + public String getWriteableName() { + return ENTRY.name; } @Override - public DataType dataType() { - return field().dataType(); + protected NodeInfo info() { + return NodeInfo.create(this, Min::new, field()); } @Override - protected boolean supportsDates() { - return true; + public Min replaceChildren(List newChildren) { + return new Min(source(), newChildren.get(0)); } @Override - protected AggregatorFunctionSupplier longSupplier(List inputChannels) { - return new MinLongAggregatorFunctionSupplier(inputChannels); + protected TypeResolution resolveType() { + return TypeResolutions.isType( + this, + e -> e == DataType.BOOLEAN || e == DataType.DATETIME || (e.isNumeric() && e != DataType.UNSIGNED_LONG), + sourceText(), + DEFAULT, + "boolean", + "datetime", + "numeric except unsigned_long or counter types" + ); } @Override - protected AggregatorFunctionSupplier intSupplier(List inputChannels) { - return new MinIntAggregatorFunctionSupplier(inputChannels); + public DataType dataType() { + return field().dataType(); } @Override - protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { - return new MinDoubleAggregatorFunctionSupplier(inputChannels); + public final AggregatorFunctionSupplier supplier(List inputChannels) { + DataType type = field().dataType(); + if (type == DataType.BOOLEAN) { + return new MinBooleanAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.LONG || type == DataType.DATETIME) { + return new MinLongAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.INTEGER) { + return new MinIntAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.DOUBLE) { + return new MinDoubleAggregatorFunctionSupplier(inputChannels); + } + throw EsqlIllegalArgumentException.illegalDataType(type); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java index 390cd0d68018e..e7825a1d11704 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -14,6 +15,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.planner.ToAggregator; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; @@ -51,6 +53,10 @@ public abstract class NumericAggregate extends AggregateFunction implements ToAg super(source, field); } + NumericAggregate(StreamInput in) throws IOException { + super(in); + } + @Override protected TypeResolution resolveType() { if (supportsDates()) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java index d21247a77d9cf..b65e78b431159 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.PercentileDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.PercentileIntAggregatorFunctionSupplier; @@ -17,7 +20,9 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; @@ -27,6 +32,12 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; public class Percentile extends NumericAggregate { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "Percentile", + Percentile::new + ); + private final Expression percentile; @FunctionInfo( @@ -43,6 +54,22 @@ public Percentile( this.percentile = percentile; } + private Percentile(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + out.writeNamedWriteable(children().get(0)); + out.writeNamedWriteable(children().get(1)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Percentile::new, field(), percentile); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java new file mode 100644 index 0000000000000..682590bb7e857 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.RateDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.RateIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.RateLongAggregatorFunctionSupplier; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.ToAggregator; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; + +import java.io.IOException; +import java.time.Duration; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; + +public class Rate extends AggregateFunction implements OptionalArgument, ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Rate", Rate::new); + private static final TimeValue DEFAULT_UNIT = TimeValue.timeValueSeconds(1); + + private final Expression timestamp; + private final Expression unit; + + @FunctionInfo( + returnType = { "double" }, + description = "compute the rate of a counter field. Available in METRICS command only", + isAggregation = true + ) + public Rate( + Source source, + @Param(name = "field", type = { "counter_long|counter_integer|counter_double" }, description = "counter field") Expression field, + Expression timestamp, + @Param(optional = true, name = "unit", type = { "time_duration" }, description = "the unit") Expression unit + ) { + super(source, field, unit != null ? List.of(timestamp, unit) : List.of(timestamp)); + this.timestamp = timestamp; + this.unit = unit; + } + + public Rate(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(field()); + out.writeNamedWriteable(timestamp); + out.writeOptionalNamedWriteable(unit); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + public static Rate withUnresolvedTimestamp(Source source, Expression field, Expression unit) { + return new Rate(source, field, new UnresolvedAttribute(source, "@timestamp"), unit); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Rate::new, field(), timestamp, unit); + } + + @Override + public Rate replaceChildren(List newChildren) { + if (unit != null) { + if (newChildren.size() == 3) { + return new Rate(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); + } + assert false : "expected 3 children for field, @timestamp, and unit; got " + newChildren; + throw new IllegalArgumentException("expected 3 children for field, @timestamp, and unit; got " + newChildren); + } else { + if (newChildren.size() == 2) { + return new Rate(source(), newChildren.get(0), newChildren.get(1), null); + } + assert false : "expected 2 children for field and @timestamp; got " + newChildren; + throw new IllegalArgumentException("expected 2 children for field and @timestamp; got " + newChildren); + } + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = isType( + field(), + dt -> dt == DataType.COUNTER_LONG || dt == DataType.COUNTER_INTEGER || dt == DataType.COUNTER_DOUBLE, + sourceText(), + FIRST, + "counter_long", + "counter_integer", + "counter_double" + ); + if (unit != null) { + resolution = resolution.and( + isType(unit, dt -> dt.isWholeNumber() || EsqlDataTypes.isTemporalAmount(dt), sourceText(), SECOND, "time_duration") + ); + } + return resolution; + } + + long unitInMillis() { + if (unit == null) { + return DEFAULT_UNIT.millis(); + } + if (unit.foldable() == false) { + throw new IllegalArgumentException("function [" + sourceText() + "] has invalid unit [" + unit.sourceText() + "]"); + } + final Object foldValue; + try { + foldValue = unit.fold(); + } catch (Exception e) { + throw new IllegalArgumentException("function [" + sourceText() + "] has invalid unit [" + unit.sourceText() + "]"); + } + if (foldValue instanceof Duration duration) { + return duration.toMillis(); + } + throw new IllegalArgumentException("function [" + sourceText() + "] has invalid unit [" + unit.sourceText() + "]"); + } + + @Override + public List inputExpressions() { + return List.of(field(), timestamp); + } + + @Override + public AggregatorFunctionSupplier supplier(List inputChannels) { + if (inputChannels.size() != 2 && inputChannels.size() != 3) { + throw new IllegalArgumentException("rate requires two for raw input or three channels for partial input; got " + inputChannels); + } + final long unitInMillis = unitInMillis(); + final DataType type = field().dataType(); + return switch (type) { + case COUNTER_LONG -> new RateLongAggregatorFunctionSupplier(inputChannels, unitInMillis); + case COUNTER_INTEGER -> new RateIntAggregatorFunctionSupplier(inputChannels, unitInMillis); + case COUNTER_DOUBLE -> new RateDoubleAggregatorFunctionSupplier(inputChannels, unitInMillis); + default -> throw EsqlIllegalArgumentException.illegalDataType(type); + }; + } + + @Override + public String toString() { + if (unit != null) { + return "rate(" + field() + "," + unit + ")"; + } else { + return "rate(" + field() + ")"; + } + } + + Expression timestamp() { + return timestamp; + } + + Expression unit() { + return unit; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java index 66a7e0ca436d6..d54d20eb4115f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; +import java.io.IOException; import java.util.Objects; /** @@ -25,6 +27,11 @@ protected SpatialAggregateFunction(Source source, Expression field, boolean useD this.useDocValues = useDocValues; } + protected SpatialAggregateFunction(StreamInput in, boolean useDocValues) throws IOException { + super(in); + this.useDocValues = useDocValues; + } + public abstract SpatialAggregateFunction withDocValues(); @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java index 418f92284cca0..d5681ba8d856e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.spatial.SpatialCentroidCartesianPointDocValuesAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.spatial.SpatialCentroidCartesianPointSourceValuesAggregatorFunctionSupplier; @@ -20,6 +22,7 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.planner.ToAggregator; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; @@ -29,6 +32,11 @@ * Calculate spatial centroid of all geo_point or cartesian point values of a field in matching documents. */ public class SpatialCentroid extends SpatialAggregateFunction implements ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "SpatialCentroid", + SpatialCentroid::new + ); @FunctionInfo(returnType = { "geo_point", "cartesian_point" }, description = "The centroid of a spatial field.", isAggregation = true) public SpatialCentroid(Source source, @Param(name = "field", type = { "geo_point", "cartesian_point" }) Expression field) { @@ -39,6 +47,15 @@ private SpatialCentroid(Source source, Expression field, boolean useDocValues) { super(source, field, useDocValues); } + private SpatialCentroid(StreamInput in) throws IOException { + super(in, false); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public SpatialCentroid withDocValues() { return new SpatialCentroid(source(), field(), true); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java index be9ae295f6fbc..e15cf774c3c3f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumIntAggregatorFunctionSupplier; @@ -22,6 +24,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvSum; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; @@ -32,12 +35,22 @@ * Sum all values of a field in matching documents. */ public class Sum extends NumericAggregate implements SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Sum", Sum::new); @FunctionInfo(returnType = "long", description = "The sum of a numeric field.", isAggregation = true) public Sum(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { super(source, field); } + private Sum(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Sum::new, field()); @@ -51,7 +64,7 @@ public Sum replaceChildren(List newChildren) { @Override public DataType dataType() { DataType dt = field().dataType(); - return dt.isInteger() == false || dt == UNSIGNED_LONG ? DOUBLE : LONG; + return dt.isWholeNumber() == false || dt == UNSIGNED_LONG ? DOUBLE : LONG; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java new file mode 100644 index 0000000000000..c1da400185944 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.aggregation.Aggregator; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.FromPartialGroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.GroupingAggregator; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.ToPartialAggregatorFunction; +import org.elasticsearch.compute.aggregation.ToPartialGroupingAggregatorFunction; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.ToAggregator; + +import java.io.IOException; +import java.util.List; +import java.util.stream.IntStream; + +/** + * An internal aggregate function that always emits intermediate (or partial) output regardless + * of the aggregate mode. The intermediate output should be consumed by {@link FromPartial}, + * which always receives the intermediate input. Since an intermediate aggregate output can + * consist of multiple blocks, we wrap these output blocks in a single composite block. + * The {@link FromPartial} then unwraps this input block into multiple primitive blocks and + * passes them to the delegating GroupingAggregatorFunction. + *

      + * Both of these commands yield the same result, except the second plan executes aggregates twice: + *

      + * ```
      + * | ... before
      + * | af(x) BY g
      + * | ... after
      + * ```
      + * ```
      + * | ... before
      + * | $x = to_partial(af(x)) BY g
      + * | from_partial($x, af(_)) BY g
      + * | ...  after
      + * 
      + * ``` + * @see ToPartialGroupingAggregatorFunction + * @see FromPartialGroupingAggregatorFunction + */ +public class ToPartial extends AggregateFunction implements ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "ToPartial", + ToPartial::new + ); + + private final Expression function; + + public ToPartial(Source source, Expression field, Expression function) { + super(source, field, List.of(function)); + this.function = function; + } + + private ToPartial(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(field()); + out.writeNamedWriteable(function); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + public Expression function() { + return function; + } + + @Override + public DataType dataType() { + return DataType.PARTIAL_AGG; + } + + @Override + protected TypeResolution resolveType() { + return function.typeResolved(); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ToPartial(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToPartial::new, field(), function); + } + + @Override + public AggregatorFunctionSupplier supplier(List inputChannels) { + final ToAggregator toAggregator = (ToAggregator) function; + return new AggregatorFunctionSupplier() { + @Override + public AggregatorFunction aggregator(DriverContext driverContext) { + assert false : "aggregatorFactory() is override"; + throw new UnsupportedOperationException(); + } + + @Override + public GroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + assert false : "groupingAggregatorFactory() is override"; + throw new UnsupportedOperationException(); + } + + @Override + public Aggregator.Factory aggregatorFactory(AggregatorMode mode) { + final AggregatorFunctionSupplier supplier; + if (mode.isInputPartial()) { + try (var dummy = toAggregator.supplier(inputChannels).aggregator(DriverContext.getLocalDriver())) { + var intermediateChannels = IntStream.range(0, dummy.intermediateBlockCount()).boxed().toList(); + supplier = toAggregator.supplier(intermediateChannels); + } + } else { + supplier = toAggregator.supplier(inputChannels); + } + return new Aggregator.Factory() { + @Override + public Aggregator apply(DriverContext driverContext) { + final AggregatorFunction aggregatorFunction = supplier.aggregator(driverContext); + return new Aggregator(new ToPartialAggregatorFunction(aggregatorFunction, inputChannels), mode); + } + + @Override + public String describe() { + return "to_partial(" + supplier.describe() + ")"; + } + }; + } + + @Override + public GroupingAggregator.Factory groupingAggregatorFactory(AggregatorMode mode) { + final AggregatorFunctionSupplier supplier; + if (mode.isInputPartial()) { + try (var dummy = toAggregator.supplier(inputChannels).aggregator(DriverContext.getLocalDriver())) { + var intermediateChannels = IntStream.range(0, dummy.intermediateBlockCount()).boxed().toList(); + supplier = toAggregator.supplier(intermediateChannels); + } + } else { + supplier = toAggregator.supplier(inputChannels); + } + return new GroupingAggregator.Factory() { + @Override + public GroupingAggregator apply(DriverContext driverContext) { + final GroupingAggregatorFunction aggregatorFunction = supplier.groupingAggregator(driverContext); + return new GroupingAggregator(new ToPartialGroupingAggregatorFunction(aggregatorFunction, inputChannels), mode); + } + + @Override + public String describe() { + return "to_partial(" + supplier.describe() + ")"; + } + }; + } + + @Override + public String describe() { + return "to_partial"; + } + }; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopList.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java similarity index 71% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopList.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java index 79893b1c7de07..c966ef7afb7c9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopList.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java @@ -7,11 +7,14 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.TopListDoubleAggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.TopListIntAggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.TopListLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.TopDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.TopIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.TopLongAggregatorFunctionSupplier; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; @@ -22,7 +25,6 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.planner.ToAggregator; import java.io.IOException; @@ -33,11 +35,13 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.THIRD; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isFoldable; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isNotNullAndFoldable; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; -public class TopList extends AggregateFunction implements ToAggregator, SurrogateExpression { +public class Top extends AggregateFunction implements ToAggregator, SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Top", Top::new); + private static final String ORDER_ASC = "ASC"; private static final String ORDER_DESC = "DESC"; @@ -45,9 +49,9 @@ public class TopList extends AggregateFunction implements ToAggregator, Surrogat returnType = { "double", "integer", "long", "date" }, description = "Collects the top values for a field. Includes repeated values.", isAggregation = true, - examples = @Example(file = "stats_top_list", tag = "top-list") + examples = @Example(file = "stats_top", tag = "top") ) - public TopList( + public Top( Source source, @Param( name = "field", @@ -64,24 +68,35 @@ public TopList( super(source, field, Arrays.asList(limit, order)); } - public static TopList readFrom(PlanStreamInput in) throws IOException { - return new TopList(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readExpression()); + private Top(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class) + ); } - public void writeTo(PlanStreamOutput out) throws IOException { + @Override + public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); List fields = children(); assert fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeExpression(fields.get(2)); + out.writeNamedWriteable(fields.get(0)); + out.writeNamedWriteable(fields.get(1)); + out.writeNamedWriteable(fields.get(2)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; } - private Expression limitField() { + Expression limitField() { return parameters().get(0); } - private Expression orderField() { + Expression orderField() { return parameters().get(1); } @@ -109,9 +124,9 @@ protected TypeResolution resolveType() { sourceText(), FIRST, "numeric except unsigned_long or counter types" - ).and(isFoldable(limitField(), sourceText(), SECOND)) + ).and(isNotNullAndFoldable(limitField(), sourceText(), SECOND)) .and(isType(limitField(), dt -> dt == DataType.INTEGER, sourceText(), SECOND, "integer")) - .and(isFoldable(orderField(), sourceText(), THIRD)) + .and(isNotNullAndFoldable(orderField(), sourceText(), THIRD)) .and(isString(orderField(), sourceText(), THIRD)); if (typeResolution.unresolved()) { @@ -140,26 +155,26 @@ public DataType dataType() { } @Override - protected NodeInfo info() { - return NodeInfo.create(this, TopList::new, children().get(0), children().get(1), children().get(2)); + protected NodeInfo info() { + return NodeInfo.create(this, Top::new, children().get(0), children().get(1), children().get(2)); } @Override - public TopList replaceChildren(List newChildren) { - return new TopList(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); + public Top replaceChildren(List newChildren) { + return new Top(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } @Override public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); if (type == DataType.LONG || type == DataType.DATETIME) { - return new TopListLongAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); + return new TopLongAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); } if (type == DataType.INTEGER) { - return new TopListIntAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); + return new TopIntAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); } if (type == DataType.DOUBLE) { - return new TopListDoubleAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); + return new TopDoubleAggregatorFunctionSupplier(inputChannels, limitValue(), orderValue()); } throw EsqlIllegalArgumentException.illegalDataType(type); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index c76f60fe0f555..7d2fbcddb113b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.ValuesBooleanAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.ValuesBytesRefAggregatorFunctionSupplier; @@ -23,11 +25,14 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.planner.ToAggregator; +import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; public class Values extends AggregateFunction implements ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Values", Values::new); + @FunctionInfo( returnType = { "boolean|date|double|integer|ip|keyword|long|text|version" }, description = "Collect values for a field.", @@ -40,6 +45,15 @@ public Values( super(source, v); } + private Values(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Values::new, field()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvg.java new file mode 100644 index 0000000000000..75315c48b3f45 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvg.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.capabilities.Validatable; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAvg; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; + +public class WeightedAvg extends AggregateFunction implements SurrogateExpression, Validatable { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "WeightedAvg", + WeightedAvg::new + ); + + private final Expression weight; + + private static final String invalidWeightError = "{} argument of [{}] cannot be null or 0, received [{}]"; + + @FunctionInfo(returnType = "double", description = "The weighted average of a numeric field.", isAggregation = true) + public WeightedAvg( + Source source, + @Param(name = "number", type = { "double", "integer", "long" }, description = "A numeric value.") Expression field, + @Param(name = "weight", type = { "double", "integer", "long" }, description = "A numeric weight.") Expression weight + ) { + super(source, field, List.of(weight)); + this.weight = weight; + } + + private WeightedAvg(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + List fields = children(); + assert fields.size() == 2; + out.writeNamedWriteable(fields.get(0)); + out.writeNamedWriteable(fields.get(1)); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected Expression.TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isType( + field(), + dt -> dt.isNumeric() && dt != DataType.UNSIGNED_LONG, + sourceText(), + FIRST, + "numeric except unsigned_long or counter types" + ); + + if (resolution.unresolved()) { + return resolution; + } + + resolution = isType( + weight(), + dt -> dt.isNumeric() && dt != DataType.UNSIGNED_LONG, + sourceText(), + SECOND, + "numeric except unsigned_long or counter types" + ); + + if (resolution.unresolved()) { + return resolution; + } + + if (weight.dataType() == DataType.NULL + || (weight.foldable() && (weight.fold() == null || weight.fold().equals(0) || weight.fold().equals(0.0)))) { + return new TypeResolution(format(null, invalidWeightError, SECOND, sourceText(), weight.foldable() ? weight.fold() : null)); + } + + return TypeResolution.TYPE_RESOLVED; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, WeightedAvg::new, field(), weight); + } + + @Override + public WeightedAvg replaceChildren(List newChildren) { + return new WeightedAvg(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + public Expression surrogate() { + var s = source(); + var field = field(); + var weight = weight(); + + if (field.foldable()) { + return new MvAvg(s, field); + } + if (weight.foldable()) { + return new Div(s, new Sum(s, field), new Count(s, field), dataType()); + } else { + return new Div(s, new Sum(s, new Mul(s, field, weight)), new Sum(s, weight), dataType()); + } + } + + public Expression weight() { + return weight; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java index a99c7a8b7ac8d..f5b40df6fa619 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java @@ -94,7 +94,7 @@ * to keep all the logic in one place. *

      * You can find examples of other aggregations using this method, - * like {@link org.elasticsearch.xpack.esql.expression.function.aggregate.TopList#writeTo(PlanStreamOutput)} + * like {@link org.elasticsearch.xpack.esql.expression.function.aggregate.Top#writeTo(PlanStreamOutput)} *

      * *
    • @@ -155,7 +155,7 @@ *
    • * Create a new StringTemplate file. * Use another as a reference, like - * {@code x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopListAggregator.java.st}. + * {@code x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-TopAggregator.java.st}. *
    • *
    • * Add the template scripts to {@code x-pack/plugin/esql/compute/build.gradle}. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 431494534f4ec..3ce51b8086dd0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -9,28 +9,33 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.capabilities.Validatable; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Foldables; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.TwoOptionalArguments; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.TwoOptionalArguments; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Floor; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.time.ZoneId; import java.time.ZoneOffset; import java.util.List; @@ -53,6 +58,8 @@ * In the former case, two parameters will be provided, in the latter four. */ public class Bucket extends GroupingFunction implements Validatable, TwoOptionalArguments { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Bucket", Bucket::new); + // TODO maybe we should just cover the whole of representable dates here - like ten years, 100 years, 1000 years, all the way up. // That way you never end up with more than the target number of buckets. private static final Rounding LARGEST_HUMAN_DATE_ROUNDING = Rounding.builder(Rounding.DateTimeUnit.YEAR_OF_CENTURY).build(); @@ -193,6 +200,30 @@ public Bucket( this.to = to; } + private Bucket(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(field); + out.writeNamedWriteable(buckets); + out.writeOptionalNamedWriteable(from); + out.writeOptionalNamedWriteable(to); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public boolean foldable() { return field.foldable() && buckets.foldable() && (from == null || from.foldable()) && (to == null || to.foldable()); @@ -202,7 +233,7 @@ public boolean foldable() { public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { if (field.dataType() == DataType.DATETIME) { Rounding.Prepared preparedRounding; - if (buckets.dataType().isInteger()) { + if (buckets.dataType().isWholeNumber()) { int b = ((Number) buckets.fold()).intValue(); long f = foldToLong(from); long t = foldToLong(to); @@ -221,7 +252,7 @@ public ExpressionEvaluator.Factory toEvaluator(Function dt.isInteger() || EsqlDataTypes.isTemporalAmount(dt), + dt -> dt.isWholeNumber() || EsqlDataTypes.isTemporalAmount(dt), sourceText(), SECOND, "integral", "date_period", "time_duration" ); - return bucketsType.isInteger() + return bucketsType.isWholeNumber() ? resolution.and(checkArgsCount(4)) .and(() -> isStringOrDate(from, sourceText(), THIRD)) .and(() -> isStringOrDate(to, sourceText(), FOURTH)) : resolution.and(checkArgsCount(2)); // temporal amount } if (fieldType.isNumeric()) { - return bucketsType.isInteger() + return bucketsType.isWholeNumber() ? checkArgsCount(4).and(() -> isNumeric(from, sourceText(), THIRD)).and(() -> isNumeric(to, sourceText(), FOURTH)) : isNumeric(buckets, sourceText(), SECOND).and(checkArgsCount(2)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java index 17934c1729ad7..563847473c992 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java @@ -10,8 +10,12 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.FullTextPredicate; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; @@ -21,12 +25,35 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; import org.elasticsearch.xpack.esql.expression.function.scalar.date.Now; +import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; +import org.elasticsearch.xpack.esql.expression.function.scalar.ip.IpPrefix; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan2; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.E; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Log; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pi; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tau; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.BinarySpatialFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Locate; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Repeat; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Replace; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Right; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; +import java.util.ArrayList; import java.util.List; /** @@ -41,22 +68,49 @@ */ public abstract class EsqlScalarFunction extends ScalarFunction implements EvaluatorMapper { public static List getNamedWriteables() { - return List.of( - Case.ENTRY, - Coalesce.ENTRY, - Concat.ENTRY, - Greatest.ENTRY, - InsensitiveEquals.ENTRY, - DateExtract.ENTRY, - DateDiff.ENTRY, - DateFormat.ENTRY, - DateParse.ENTRY, - DateTrunc.ENTRY, - Least.ENTRY, - Now.ENTRY, - ToLower.ENTRY, - ToUpper.ENTRY - ); + List entries = new ArrayList<>(); + entries.add(And.ENTRY); + entries.add(Atan2.ENTRY); + entries.add(Bucket.ENTRY); + entries.add(Case.ENTRY); + entries.add(CIDRMatch.ENTRY); + entries.add(Coalesce.ENTRY); + entries.add(Concat.ENTRY); + entries.add(E.ENTRY); + entries.add(EndsWith.ENTRY); + entries.add(Greatest.ENTRY); + entries.add(In.ENTRY); + entries.add(InsensitiveEquals.ENTRY); + entries.add(DateExtract.ENTRY); + entries.add(DateDiff.ENTRY); + entries.add(DateFormat.ENTRY); + entries.add(DateParse.ENTRY); + entries.add(DateTrunc.ENTRY); + entries.add(IpPrefix.ENTRY); + entries.add(Least.ENTRY); + entries.add(Left.ENTRY); + entries.add(Locate.ENTRY); + entries.add(Log.ENTRY); + entries.add(Now.ENTRY); + entries.add(Or.ENTRY); + entries.add(Pi.ENTRY); + entries.add(Pow.ENTRY); + entries.add(Right.ENTRY); + entries.add(Repeat.ENTRY); + entries.add(Replace.ENTRY); + entries.add(Round.ENTRY); + entries.add(Split.ENTRY); + entries.add(Substring.ENTRY); + entries.add(StartsWith.ENTRY); + entries.add(Tau.ENTRY); + entries.add(ToLower.ENTRY); + entries.add(ToUpper.ENTRY); + entries.addAll(BinarySpatialFunction.getNamedWriteables()); + entries.addAll(EsqlArithmeticOperation.getNamedWriteables()); + entries.addAll(EsqlBinaryComparison.getNamedWriteables()); + entries.addAll(FullTextPredicate.getNamedWriteables()); + entries.addAll(UnaryScalarFunction.getNamedWriteables()); + return entries; } protected EsqlScalarFunction(Source source) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index eb2e5ab94487f..0e9dbf3057c1b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -50,17 +50,20 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.math.Sqrt; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tan; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tanh; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Trim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -68,51 +71,54 @@ public abstract class UnaryScalarFunction extends EsqlScalarFunction { public static List getNamedWriteables() { - return List.of( - Abs.ENTRY, - Acos.ENTRY, - Asin.ENTRY, - Atan.ENTRY, - Cbrt.ENTRY, - Ceil.ENTRY, - Cos.ENTRY, - Cosh.ENTRY, - Floor.ENTRY, - FromBase64.ENTRY, - IsNotNull.ENTRY, - IsNull.ENTRY, - Length.ENTRY, - Log10.ENTRY, - LTrim.ENTRY, - Neg.ENTRY, - Not.ENTRY, - RTrim.ENTRY, - Signum.ENTRY, - Sin.ENTRY, - Sinh.ENTRY, - Sqrt.ENTRY, - StX.ENTRY, - StY.ENTRY, - Tan.ENTRY, - Tanh.ENTRY, - ToBase64.ENTRY, - ToBoolean.ENTRY, - ToCartesianPoint.ENTRY, - ToDatetime.ENTRY, - ToDegrees.ENTRY, - ToDouble.ENTRY, - ToGeoShape.ENTRY, - ToCartesianShape.ENTRY, - ToGeoPoint.ENTRY, - ToIP.ENTRY, - ToInteger.ENTRY, - ToLong.ENTRY, - ToRadians.ENTRY, - ToString.ENTRY, - ToUnsignedLong.ENTRY, - ToVersion.ENTRY, - Trim.ENTRY - ); + List entries = new ArrayList<>(); + entries.add(Abs.ENTRY); + entries.add(Acos.ENTRY); + entries.add(Asin.ENTRY); + entries.add(Atan.ENTRY); + entries.add(Cbrt.ENTRY); + entries.add(Ceil.ENTRY); + entries.add(Cos.ENTRY); + entries.add(Cosh.ENTRY); + entries.add(Floor.ENTRY); + entries.add(FromBase64.ENTRY); + entries.add(IsNotNull.ENTRY); + entries.add(IsNull.ENTRY); + entries.add(Length.ENTRY); + entries.add(Log10.ENTRY); + entries.add(LTrim.ENTRY); + entries.add(Neg.ENTRY); + entries.add(Not.ENTRY); + entries.add(RLike.ENTRY); + entries.add(RTrim.ENTRY); + entries.add(Signum.ENTRY); + entries.add(Sin.ENTRY); + entries.add(Sinh.ENTRY); + entries.add(Sqrt.ENTRY); + entries.add(StX.ENTRY); + entries.add(StY.ENTRY); + entries.add(Tan.ENTRY); + entries.add(Tanh.ENTRY); + entries.add(ToBase64.ENTRY); + entries.add(ToBoolean.ENTRY); + entries.add(ToCartesianPoint.ENTRY); + entries.add(ToDatetime.ENTRY); + entries.add(ToDegrees.ENTRY); + entries.add(ToDouble.ENTRY); + entries.add(ToGeoShape.ENTRY); + entries.add(ToCartesianShape.ENTRY); + entries.add(ToGeoPoint.ENTRY); + entries.add(ToIP.ENTRY); + entries.add(ToInteger.ENTRY); + entries.add(ToLong.ENTRY); + entries.add(ToRadians.ENTRY); + entries.add(ToString.ENTRY); + entries.add(ToUnsignedLong.ENTRY); + entries.add(ToVersion.ENTRY); + entries.add(Trim.ENTRY); + entries.add(WildcardLike.ENTRY); + entries.addAll(AbstractMultivalueFunction.getNamedWriteables()); + return entries; } protected final Expression field; @@ -123,13 +129,13 @@ public UnaryScalarFunction(Source source, Expression field) { } protected UnaryScalarFunction(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression()); + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class)); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(field); + out.writeNamedWriteable(field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index 50d0e5484756e..3239afabf6a24 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -31,7 +31,6 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import java.io.IOException; @@ -43,8 +42,6 @@ import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; public final class Case extends EsqlScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Case", Case::new); @@ -123,16 +120,16 @@ public Case( private Case(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(children().get(0)); - out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + out.writeNamedWriteable(children().get(0)); + out.writeNamedWriteableCollection(children().subList(1, children().size())); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index 580e2f9900208..7c0427a95d478 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -17,17 +17,16 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; import java.util.List; @@ -35,8 +34,6 @@ import java.util.stream.Stream; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Returns the maximum value of multiple columns. @@ -74,16 +71,16 @@ public Greatest( private Greatest(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(children().get(0)); - out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + out.writeNamedWriteable(children().get(0)); + out.writeNamedWriteableCollection(children().subList(1, children().size())); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index 2255fed9d4947..272e65106e7de 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -17,17 +17,16 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; import java.util.List; @@ -35,8 +34,6 @@ import java.util.stream.Stream; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Returns the minimum value of multiple columns. @@ -72,16 +69,16 @@ public Least( private Least(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(children().get(0)); - out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + out.writeNamedWriteable(children().get(0)); + out.writeNamedWriteableCollection(children().subList(1, children().size())); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java index 96601905d40c9..0fed02f89fd92 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java @@ -57,7 +57,7 @@ protected AbstractConvertFunction(Source source, Expression field) { } protected AbstractConvertFunction(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression()); + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class)); } /** diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java index 2a224598253f9..582785d023945 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; import java.time.Instant; @@ -176,18 +175,18 @@ public DateDiff( private DateDiff(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - ((PlanStreamInput) in).readExpression(), - ((PlanStreamInput) in).readExpression() + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); - ((PlanStreamOutput) out).writeExpression(unit); - ((PlanStreamOutput) out).writeExpression(startTimestamp); - ((PlanStreamOutput) out).writeExpression(endTimestamp); + out.writeNamedWriteable(unit); + out.writeNamedWriteable(startTimestamp); + out.writeNamedWriteable(endTimestamp); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java index f3448a2b7c5ff..5a57e98be38b9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.io.IOException; @@ -83,8 +82,8 @@ public DateExtract( private DateExtract(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - ((PlanStreamInput) in).readExpression(), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), ((PlanStreamInput) in).configuration() ); } @@ -92,8 +91,8 @@ private DateExtract(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(datePart()); - ((PlanStreamOutput) out).writeExpression(field()); + out.writeNamedWriteable(datePart()); + out.writeNamedWriteable(field()); } Expression datePart() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java index 9a789c2bb6fb2..84a1a6e77ea73 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java @@ -16,17 +16,16 @@ import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.session.Configuration; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -73,8 +72,8 @@ Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.S private DateFormat(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class), ((PlanStreamInput) in).configuration() ); } @@ -82,8 +81,8 @@ private DateFormat(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(children().get(0)); - out.writeOptionalWriteable(children().size() == 1 ? null : o -> ((PlanStreamOutput) o).writeExpression(children().get(1))); + out.writeNamedWriteable(children().get(0)); + out.writeOptionalNamedWriteable(children().size() == 2 ? children().get(1) : null); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java index 12ffe092287ed..eb710e72882b1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java @@ -17,16 +17,15 @@ import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.io.IOException; @@ -78,16 +77,16 @@ public DateParse( private DateParse(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(children().get(0)); - out.writeOptionalWriteable(children().size() == 2 ? o -> ((PlanStreamOutput) out).writeExpression(children().get(1)) : null); + out.writeNamedWriteable(children().get(0)); + out.writeOptionalNamedWriteable(children().size() == 2 ? children().get(1) : null); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java index 995e525dda9ec..c39905f261d88 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.io.IOException; @@ -82,14 +81,14 @@ public DateTrunc( } private DateTrunc(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).readExpression()); + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(interval); - ((PlanStreamOutput) out).writeExpression(timestampField); + out.writeNamedWriteable(interval); + out.writeNamedWriteable(timestampField); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java index e2c2395446ed6..c141beeefb1ea 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.ip; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.CIDRUtils; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; @@ -22,7 +25,9 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -45,6 +50,11 @@ * Example: `| eval cidr="10.0.0.0/8" | where cidr_match(ip_field, "127.0.0.1/30", cidr)` */ public class CIDRMatch extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "CIDRMatch", + CIDRMatch::new + ); private final Expression ipField; private final List matches; @@ -68,6 +78,27 @@ public CIDRMatch( this.matches = matches; } + private CIDRMatch(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + assert children().size() > 1; + out.writeNamedWriteable(children().get(0)); + out.writeNamedWriteableCollection(children().subList(1, children().size())); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + public Expression ipField() { return ipField; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java index d00d1b2c35fcb..60b464b26750a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java @@ -8,21 +8,23 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.ip; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; import java.util.Arrays; @@ -40,6 +42,8 @@ * Truncates an IP value to a given prefix length. */ public class IpPrefix extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "IpPrefix", IpPrefix::new); + // Borrowed from Lucene, rfc4291 prefix private static final byte[] IPV4_PREFIX = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1 }; @@ -76,17 +80,26 @@ public IpPrefix( this.prefixLengthV6Field = prefixLengthV6Field; } - public static IpPrefix readFrom(PlanStreamInput in) throws IOException { - return new IpPrefix(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readExpression()); + private IpPrefix(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class) + ); } - public void writeTo(PlanStreamOutput out) throws IOException { + @Override + public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - List fields = children(); - assert fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeExpression(fields.get(2)); + out.writeNamedWriteable(ipField); + out.writeNamedWriteable(prefixLengthV4Field); + out.writeNamedWriteable(prefixLengthV6Field); + } + + @Override + public String getWriteableName() { + return ENTRY.name; } public Expression ipField() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java index a2af991a244c3..f940cb6d68554 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.math; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -19,7 +22,9 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.List; import java.util.function.Function; @@ -29,6 +34,8 @@ * Inverse cosine trigonometric function. */ public class Atan2 extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Atan2", Atan2::new); + private final Expression y; private final Expression x; @@ -56,6 +63,22 @@ public Atan2( this.x = x; } + private Atan2(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(y); + out.writeNamedWriteable(x); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public Expression replaceChildren(List newChildren) { return new Atan2(source(), newChildren.get(0), newChildren.get(1)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Ceil.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Ceil.java index 7d31cec0e54a2..909de387c62ff 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Ceil.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Ceil.java @@ -65,7 +65,7 @@ public String getWriteableName() { @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - if (dataType().isInteger()) { + if (dataType().isWholeNumber()) { return toEvaluator.apply(field()); } var fieldEval = toEvaluator.apply(field()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/E.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/E.java index 9bcd8a2467b1d..757b67b47ce72 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/E.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/E.java @@ -7,17 +7,24 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.math; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.List; /** * Function that emits Euler's number. */ public class E extends DoubleConstantFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "E", E::new); + @FunctionInfo( returnType = "double", description = "Returns {wikipedia}/E_(mathematical_constant)[Euler's number].", @@ -27,6 +34,20 @@ public E(Source source) { super(source); } + private E(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public Object fold() { return Math.E; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Floor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Floor.java index 73ff0aec2b126..638770f2f079a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Floor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Floor.java @@ -67,7 +67,7 @@ public String getWriteableName() { @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - if (dataType().isInteger()) { + if (dataType().isWholeNumber()) { return toEvaluator.apply(field()); } return new FloorDoubleEvaluator.Factory(source(), toEvaluator.apply(field())); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java index 97007f10b31bc..da11d1e77885b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java @@ -7,18 +7,23 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.math; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -28,6 +33,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isNumeric; public class Log extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Log", Log::new); private final Expression base; private final Expression value; @@ -60,6 +66,27 @@ public Log( this.base = value != null ? base : null; } + private Log(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + assert children().size() == 1 || children().size() == 2; + out.writeNamedWriteable(children().get(0)); + out.writeOptionalNamedWriteable(children().size() == 2 ? children().get(1) : null); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveType() { if (childrenResolved() == false) { @@ -126,4 +153,12 @@ public ExpressionEvaluator.Factory toEvaluator(Function EVALUATOR_IDENTITY = (s, e) -> e; @@ -67,6 +73,26 @@ public Round( this.decimals = decimals; } + private Round(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(field); + out.writeOptionalNamedWriteable(decimals); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected TypeResolution resolveType() { if (childrenResolved() == false) { @@ -78,7 +104,7 @@ protected TypeResolution resolveType() { return resolution; } - return decimals == null ? TypeResolution.TYPE_RESOLVED : isInteger(decimals, sourceText(), SECOND); + return decimals == null ? TypeResolution.TYPE_RESOLVED : isWholeNumber(decimals, sourceText(), SECOND); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tau.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tau.java index 7a2eb801be84a..17e5b027270d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tau.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Tau.java @@ -7,17 +7,24 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.math; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.List; /** * Function that emits tau, also known as 2 * pi. */ public class Tau extends DoubleConstantFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Tau", Tau::new); + public static final double TAU = Math.PI * 2; @FunctionInfo( @@ -29,6 +36,20 @@ public Tau(Source source) { super(source); } + private Tau(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Source.EMPTY.writeTo(out); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public Object fold() { return TAU; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java index 9b7e0b729cde9..cffb208940aa5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java @@ -18,7 +18,6 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -57,13 +56,13 @@ protected AbstractMultivalueFunction(Source source, Expression field) { } protected AbstractMultivalueFunction(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression()); + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class)); } @Override public final void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); - ((PlanStreamOutput) out).writeExpression(field); + out.writeNamedWriteable(field); } /** diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java index 99844d40e0565..dc4b78d980c28 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java @@ -111,7 +111,7 @@ public MvAppend( } private MvAppend(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readExpression(), ((PlanStreamInput) in).readExpression()); + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index f824d0821cfbf..3728f4305d5c7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -22,14 +22,13 @@ import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -117,20 +116,18 @@ public MvSlice( private MvSlice(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - ((PlanStreamInput) in).readExpression(), - // TODO readOptionalNamedWriteable - in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); - ((PlanStreamOutput) out).writeExpression(field); - ((PlanStreamOutput) out).writeExpression(start); - // TODO writeOptionalNamedWriteable - out.writeOptionalWriteable(end == null ? null : o -> ((PlanStreamOutput) o).writeExpression(end)); + out.writeNamedWriteable(field); + out.writeNamedWriteable(start); + out.writeOptionalNamedWriteable(end); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index fd5f493ae405e..ee83236ac6a63 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; @@ -29,16 +30,16 @@ import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeInt; import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeLong; import org.elasticsearch.xpack.esql.capabilities.Validatable; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failure; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -65,6 +66,9 @@ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Vali private final Expression field, order; private static final Literal ASC = new Literal(Source.EMPTY, "ASC", DataType.KEYWORD); + private static final Literal DESC = new Literal(Source.EMPTY, "DESC", DataType.KEYWORD); + + private static final String INVALID_ORDER_ERROR = "Invalid order value in [{}], expected one of [{}, {}] but got [{}]"; @FunctionInfo( returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, @@ -85,7 +89,7 @@ public MvSort( optional = true ) Expression order ) { - super(source, order == null ? Arrays.asList(field, ASC) : Arrays.asList(field, order)); + super(source, order == null ? Arrays.asList(field) : Arrays.asList(field, order)); this.field = field; this.order = order; } @@ -93,18 +97,16 @@ public MvSort( private MvSort(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - // TODO readOptionalNamedWriteable - in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(field); - // TODO writeOptionalNamedWriteable - out.writeOptionalWriteable(order == null ? null : o -> ((PlanStreamOutput) o).writeExpression(order)); + out.writeNamedWriteable(field); + out.writeOptionalNamedWriteable(order); } @Override @@ -131,6 +133,7 @@ protected TypeResolution resolveType() { if (resolution.unresolved()) { return resolution; } + if (order == null) { return resolution; } @@ -147,10 +150,23 @@ public boolean foldable() { public EvalOperator.ExpressionEvaluator.Factory toEvaluator( Function toEvaluator ) { - Expression nonNullOrder = order == null ? ASC : order; - boolean ordering = nonNullOrder.foldable() && ((BytesRef) nonNullOrder.fold()).utf8ToString().equalsIgnoreCase("DESC") - ? false - : true; + boolean ordering = true; + if (isValidOrder() == false) { + throw new IllegalArgumentException( + LoggerMessageFormat.format( + null, + INVALID_ORDER_ERROR, + sourceText(), + ASC.value(), + DESC.value(), + ((BytesRef) order.fold()).utf8ToString() + ) + ); + } + if (order != null && order.foldable()) { + ordering = ((BytesRef) order.fold()).utf8ToString().equalsIgnoreCase((String) ASC.value()); + } + return switch (PlannerUtils.toElementType(field.dataType())) { case BOOLEAN -> new MvSort.EvaluatorFactory( toEvaluator.apply(field), @@ -219,8 +235,33 @@ public DataType dataType() { @Override public void validate(Failures failures) { + if (order == null) { + return; + } String operation = sourceText(); failures.add(isFoldable(order, operation, SECOND)); + if (isValidOrder() == false) { + failures.add( + Failure.fail(order, INVALID_ORDER_ERROR, sourceText(), ASC.value(), DESC.value(), ((BytesRef) order.fold()).utf8ToString()) + ); + } + } + + private boolean isValidOrder() { + boolean isValidOrder = true; + if (order != null && order.foldable()) { + Object obj = order.fold(); + String o = null; + if (obj instanceof BytesRef ob) { + o = ob.utf8ToString(); + } else if (obj instanceof String os) { + o = os; + } + if (o == null || o.equalsIgnoreCase((String) ASC.value()) == false && o.equalsIgnoreCase((String) DESC.value()) == false) { + isValidOrder = false; + } + } + return isValidOrder; } private record EvaluatorFactory( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java index 15bd09a4089e6..fd3b9e7664dff 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZip.java @@ -18,14 +18,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.Nullability; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -74,20 +73,18 @@ public MvZip( private MvZip(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - ((PlanStreamInput) in).readExpression(), - // TODO readOptionalNamedWriteable - in.readOptionalWriteable(i -> ((PlanStreamInput) i).readExpression()) + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); - ((PlanStreamOutput) out).writeExpression(mvLeft); - ((PlanStreamOutput) out).writeExpression(mvRight); - // TODO writeOptionalNamedWriteable - out.writeOptionalWriteable(delim == null ? null : o -> ((PlanStreamOutput) o).writeExpression(delim)); + out.writeNamedWriteable(mvLeft); + out.writeNamedWriteable(mvRight); + out.writeOptionalNamedWriteable(delim); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index 6a02eb4b94f12..30c6abc5398e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -22,16 +22,15 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import java.io.IOException; @@ -41,8 +40,6 @@ import java.util.stream.Stream; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Function returning the first non-null value. @@ -113,16 +110,16 @@ public Coalesce( private Coalesce(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(children().get(0)); - out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + out.writeNamedWriteable(children().get(0)); + out.writeNamedWriteableCollection(children().subList(1, children().size())); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java index 2e40ee1634d1b..4f9219247d5c2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java @@ -107,20 +107,14 @@ * This links it into the language and {@code META FUNCTIONS}. *
    • *
    • - * Register your function for serialization. We're in the process of migrating this serialization - * from an older way to the more common, {@link org.elasticsearch.common.io.stream.NamedWriteable}. - *

      - * All subclasses of {@link org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction}, - * {@link org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison}, - * and {@link org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation} - * are migrated and should include a "getWriteableName", "writeTo", and a deserializing constructor. - * They should also include a {@link org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry} - * and it should be linked in {@link org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction}. - *

      - *

      - * Other functions serialized in {@link org.elasticsearch.xpack.esql.io.stream.PlanNamedTypes} - * and you should copy what's done there. - *

      + * Implement serialization for your function by implementing + * {@link org.elasticsearch.common.io.stream.NamedWriteable#getWriteableName}, + * {@link org.elasticsearch.common.io.stream.NamedWriteable#writeTo}, + * and a deserializing constructor. Then add an {@link org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry} + * constant and register it. To register it, look for a method like + * {@link org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction#getNamedWriteables()} + * in your function's class hierarchy. Keep going up until you hit a function with that name. + * Then add your new "ENTRY" constant to the list it returns. *
    • *
    • * Rerun the {@code CsvTests}. They should find your function and maybe even pass. Add a @@ -133,7 +127,7 @@ *
    • *
    • * Now it's time to make a unit test! The infrastructure for these is under some flux at - * the moment, but it's good to extend from {@code AbstractFunctionTestCase}. All of + * the moment, but it's good to extend from {@code AbstractScalarFunctionTestCase}. All of * these tests are parameterized and expect to spend some time finding good parameters. *
    • *
    • diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java new file mode 100644 index 0000000000000..1beef40ce0c42 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java @@ -0,0 +1,240 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; +import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; +import static org.elasticsearch.xpack.esql.core.type.DataType.isNull; + +/** + * Spatial functions that take two arguments that must both be spatial types can inherit from this class. + * This provides common support for type resolution and validation. Ensuring that both arguments are spatial types + * and of compatible CRS. For example geo_point and geo_shape can be compared, but not geo_point and cartesian_point. + */ +public abstract class BinarySpatialFunction extends BinaryScalarFunction implements SpatialEvaluatorFactory.SpatialSourceResolution { + public static List getNamedWriteables() { + return List.of(SpatialContains.ENTRY, SpatialDisjoint.ENTRY, SpatialIntersects.ENTRY, SpatialWithin.ENTRY, StDistance.ENTRY); + } + + private final SpatialTypeResolver spatialTypeResolver; + protected SpatialCrsType crsType; + protected final boolean leftDocValues; + protected final boolean rightDocValues; + + protected BinarySpatialFunction( + Source source, + Expression left, + Expression right, + boolean leftDocValues, + boolean rightDocValues, + boolean pointsOnly + ) { + super(source, left, right); + this.leftDocValues = leftDocValues; + this.rightDocValues = rightDocValues; + this.spatialTypeResolver = new SpatialTypeResolver(this, pointsOnly); + } + + protected BinarySpatialFunction(StreamInput in, boolean leftDocValues, boolean rightDocValues, boolean pointsOnly) throws IOException { + this( + Source.EMPTY, + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + leftDocValues, + rightDocValues, + pointsOnly + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(left()); + out.writeNamedWriteable(right()); + } + + @Override + protected TypeResolution resolveType() { + return spatialTypeResolver.resolveType(); + } + + static class SpatialTypeResolver { + private final SpatialEvaluatorFactory.SpatialSourceResolution supplier; + private final boolean pointsOnly; + + SpatialTypeResolver(SpatialEvaluatorFactory.SpatialSourceResolution supplier, boolean pointsOnly) { + this.supplier = supplier; + this.pointsOnly = pointsOnly; + } + + public Expression left() { + return supplier.left(); + } + + public Expression right() { + return supplier.right(); + } + + public String sourceText() { + return supplier.source().text(); + } + + protected TypeResolution resolveType() { + if (left().foldable() && right().foldable() == false || isNull(left().dataType())) { + // Left is literal, but right is not, check the left field's type against the right field + return resolveType(right(), left(), SECOND, FIRST); + } else { + // All other cases check the right against the left + return resolveType(left(), right(), FIRST, SECOND); + } + } + + protected Expression.TypeResolution isSpatial(Expression e, TypeResolutions.ParamOrdinal paramOrd) { + return pointsOnly + ? EsqlTypeResolutions.isSpatialPoint(e, sourceText(), paramOrd) + : EsqlTypeResolutions.isSpatial(e, sourceText(), paramOrd); + } + + private TypeResolution resolveType( + Expression leftExpression, + Expression rightExpression, + TypeResolutions.ParamOrdinal leftOrdinal, + TypeResolutions.ParamOrdinal rightOrdinal + ) { + TypeResolution leftResolution = isSpatial(leftExpression, leftOrdinal); + TypeResolution rightResolution = isSpatial(rightExpression, rightOrdinal); + if (leftResolution.resolved()) { + return resolveType(leftExpression, rightExpression, rightOrdinal); + } else if (rightResolution.resolved()) { + return resolveType(rightExpression, leftExpression, leftOrdinal); + } else { + return leftResolution; + } + } + + protected TypeResolution resolveType( + Expression spatialExpression, + Expression otherExpression, + TypeResolutions.ParamOrdinal otherParamOrdinal + ) { + if (isNull(spatialExpression.dataType())) { + return isSpatial(otherExpression, otherParamOrdinal); + } + TypeResolution resolution = isSameSpatialType(spatialExpression.dataType(), otherExpression, sourceText(), otherParamOrdinal); + if (resolution.unresolved()) { + return resolution; + } + supplier.setCrsType(spatialExpression.dataType()); + return TypeResolution.TYPE_RESOLVED; + } + + protected TypeResolution isSameSpatialType( + DataType spatialDataType, + Expression expression, + String operationName, + TypeResolutions.ParamOrdinal paramOrd + ) { + return pointsOnly + ? isType(expression, dt -> dt == spatialDataType, operationName, paramOrd, compatibleTypeNames(spatialDataType)) + : isType( + expression, + dt -> EsqlDataTypes.isSpatial(dt) && spatialCRSCompatible(spatialDataType, dt), + operationName, + paramOrd, + compatibleTypeNames(spatialDataType) + ); + } + } + + @Override + public void setCrsType(DataType dataType) { + crsType = SpatialCrsType.fromDataType(dataType); + } + + private static final String[] GEO_TYPE_NAMES = new String[] { GEO_POINT.typeName(), GEO_SHAPE.typeName() }; + private static final String[] CARTESIAN_TYPE_NAMES = new String[] { GEO_POINT.typeName(), GEO_SHAPE.typeName() }; + + protected static boolean spatialCRSCompatible(DataType spatialDataType, DataType otherDataType) { + return EsqlDataTypes.isSpatialGeo(spatialDataType) && EsqlDataTypes.isSpatialGeo(otherDataType) + || EsqlDataTypes.isSpatialGeo(spatialDataType) == false && EsqlDataTypes.isSpatialGeo(otherDataType) == false; + } + + static String[] compatibleTypeNames(DataType spatialDataType) { + return EsqlDataTypes.isSpatialGeo(spatialDataType) ? GEO_TYPE_NAMES : CARTESIAN_TYPE_NAMES; + } + + @Override + public SpatialCrsType crsType() { + if (crsType == null) { + resolveType(); + } + return crsType; + } + + public boolean leftDocValues() { + return leftDocValues; + } + + public boolean rightDocValues() { + return rightDocValues; + } + + /** + * For most spatial functions we only need to know if the CRS is geo or cartesian, not whether the type is point or shape. + * This enum captures this knowledge. + */ + public enum SpatialCrsType { + GEO, + CARTESIAN, + UNSPECIFIED; + + public static SpatialCrsType fromDataType(DataType dataType) { + return EsqlDataTypes.isSpatialGeo(dataType) ? SpatialCrsType.GEO + : EsqlDataTypes.isSpatial(dataType) ? SpatialCrsType.CARTESIAN + : SpatialCrsType.UNSPECIFIED; + } + } + + protected abstract static class BinarySpatialComparator { + protected final SpatialCoordinateTypes spatialCoordinateType; + protected final CoordinateEncoder coordinateEncoder; + protected final SpatialCrsType crsType; + + protected BinarySpatialComparator(SpatialCoordinateTypes spatialCoordinateType, CoordinateEncoder encoder) { + this.spatialCoordinateType = spatialCoordinateType; + this.coordinateEncoder = encoder; + this.crsType = spatialCoordinateType.equals(SpatialCoordinateTypes.GEO) ? SpatialCrsType.GEO : SpatialCrsType.CARTESIAN; + } + + protected Geometry fromBytesRef(BytesRef bytesRef) { + return SpatialCoordinateTypes.UNSPECIFIED.wkbToGeometry(bytesRef); + } + + protected abstract T compare(BytesRef left, BytesRef right) throws IOException; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java index 63fdb9b5bc774..6c2d11ab0ad16 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java @@ -11,6 +11,8 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.geometry.Geometry; @@ -51,6 +53,12 @@ * Here we simply wire the rules together specific to ST_CONTAINS and QueryRelation.CONTAINS. */ public class SpatialContains extends SpatialRelatesFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "SpatialContains", + SpatialContains::new + ); + // public for test access with reflection public static final SpatialRelationsContains GEO = new SpatialRelationsContains( SpatialCoordinateTypes.GEO, @@ -118,20 +126,14 @@ private boolean pointRelatesGeometries(long encoded, Component2D[] rightComponen ) public SpatialContains( Source source, - @Param( - name = "geomA", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`." - ) Expression left, - @Param( - name = "geomB", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`.\n" - + "The second parameter must also have the same coordinate system as the first.\n" - + "This means it is not possible to combine `geo_*` and `cartesian_*` parameters." - ) Expression right + @Param(name = "geomA", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`.""") Expression left, + @Param(name = "geomB", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`. + The second parameter must also have the same coordinate system as the first. + This means it is not possible to combine `geo_*` and `cartesian_*` parameters.""") Expression right ) { this(source, left, right, false, false); } @@ -140,6 +142,15 @@ public SpatialContains( super(source, left, right, leftDocValues, rightDocValues); } + private SpatialContains(StreamInput in) throws IOException { + super(in, false, false); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public ShapeField.QueryRelation queryRelation() { return ShapeField.QueryRelation.CONTAINS; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java index 26d48831fdd81..e5520079e1b10 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java @@ -11,6 +11,8 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.geometry.Geometry; @@ -48,6 +50,12 @@ * Here we simply wire the rules together specific to ST_DISJOINT and QueryRelation.DISJOINT. */ public class SpatialDisjoint extends SpatialRelatesFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "SpatialDisjoint", + SpatialDisjoint::new + ); + // public for test access with reflection public static final SpatialRelations GEO = new SpatialRelations( ShapeField.QueryRelation.DISJOINT, @@ -73,20 +81,14 @@ public class SpatialDisjoint extends SpatialRelatesFunction { ) public SpatialDisjoint( Source source, - @Param( - name = "geomA", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`." - ) Expression left, - @Param( - name = "geomB", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`.\n" - + "The second parameter must also have the same coordinate system as the first.\n" - + "This means it is not possible to combine `geo_*` and `cartesian_*` parameters." - ) Expression right + @Param(name = "geomA", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`.""") Expression left, + @Param(name = "geomB", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`. + The second parameter must also have the same coordinate system as the first. + This means it is not possible to combine `geo_*` and `cartesian_*` parameters.""") Expression right ) { this(source, left, right, false, false); } @@ -95,6 +97,15 @@ private SpatialDisjoint(Source source, Expression left, Expression right, boolea super(source, left, right, leftDocValues, rightDocValues); } + private SpatialDisjoint(StreamInput in) throws IOException { + super(in, false, false); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public ShapeField.QueryRelation queryRelation() { return ShapeField.QueryRelation.DISJOINT; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialEvaluatorFactory.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialEvaluatorFactory.java index 14e743c5be460..6fd4f79125a21 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialEvaluatorFactory.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialEvaluatorFactory.java @@ -79,13 +79,20 @@ interface SpatialSourceSupplier { Expression right(); - SpatialRelatesFunction.SpatialCrsType crsType(); + BinarySpatialFunction.SpatialCrsType crsType(); boolean leftDocValues(); boolean rightDocValues(); } + /** + * When performing type resolution we need also write access to the SpatialSourceSupplier for setting the CRS + */ + interface SpatialSourceResolution extends SpatialSourceSupplier { + void setCrsType(DataType dataType); + } + protected static class SwappedSpatialSourceSupplier implements SpatialSourceSupplier { private final SpatialSourceSupplier delegate; @@ -99,7 +106,7 @@ public Source source() { } @Override - public SpatialRelatesFunction.SpatialCrsType crsType() { + public BinarySpatialFunction.SpatialCrsType crsType() { return delegate.crsType(); } @@ -209,13 +216,13 @@ public EvalOperator.ExpressionEvaluator.Factory get( protected record SpatialEvaluatorFieldKey(DataType dataType, boolean isConstant) {} record SpatialEvaluatorKey( - SpatialRelatesFunction.SpatialCrsType crsType, + BinarySpatialFunction.SpatialCrsType crsType, boolean leftDocValues, boolean rightDocValues, SpatialEvaluatorFieldKey left, SpatialEvaluatorFieldKey right ) { - SpatialEvaluatorKey(SpatialRelatesFunction.SpatialCrsType crsType, SpatialEvaluatorFieldKey left, SpatialEvaluatorFieldKey right) { + SpatialEvaluatorKey(BinarySpatialFunction.SpatialCrsType crsType, SpatialEvaluatorFieldKey left, SpatialEvaluatorFieldKey right) { this(crsType, false, false, left, right); } @@ -229,7 +236,7 @@ SpatialEvaluatorKey swapSides() { static SpatialEvaluatorKey fromSourceAndConstant(DataType left, DataType right) { return new SpatialEvaluatorKey( - SpatialRelatesFunction.SpatialCrsType.fromDataType(left), + BinarySpatialFunction.SpatialCrsType.fromDataType(left), new SpatialEvaluatorFieldKey(left, false), new SpatialEvaluatorFieldKey(right, true) ); @@ -237,7 +244,7 @@ static SpatialEvaluatorKey fromSourceAndConstant(DataType left, DataType right) static SpatialEvaluatorKey fromSources(DataType left, DataType right) { return new SpatialEvaluatorKey( - SpatialRelatesFunction.SpatialCrsType.fromDataType(left), + BinarySpatialFunction.SpatialCrsType.fromDataType(left), new SpatialEvaluatorFieldKey(left, false), new SpatialEvaluatorFieldKey(right, false) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java index c0794f59dcf81..045690340f6ac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java @@ -11,6 +11,8 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.geometry.Geometry; @@ -48,6 +50,12 @@ * Here we simply wire the rules together specific to ST_INTERSECTS and QueryRelation.INTERSECTS. */ public class SpatialIntersects extends SpatialRelatesFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "SpatialIntersects", + SpatialIntersects::new + ); + // public for test access with reflection public static final SpatialRelations GEO = new SpatialRelations( ShapeField.QueryRelation.INTERSECTS, @@ -71,20 +79,14 @@ public class SpatialIntersects extends SpatialRelatesFunction { In mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅""", examples = @Example(file = "spatial", tag = "st_intersects-airports")) public SpatialIntersects( Source source, - @Param( - name = "geomA", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`." - ) Expression left, - @Param( - name = "geomB", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`.\n" - + "The second parameter must also have the same coordinate system as the first.\n" - + "This means it is not possible to combine `geo_*` and `cartesian_*` parameters." - ) Expression right + @Param(name = "geomA", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`.""") Expression left, + @Param(name = "geomB", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`. + The second parameter must also have the same coordinate system as the first. + This means it is not possible to combine `geo_*` and `cartesian_*` parameters.""") Expression right ) { this(source, left, right, false, false); } @@ -93,6 +95,15 @@ private SpatialIntersects(Source source, Expression left, Expression right, bool super(source, left, right, leftDocValues, rightDocValues); } + private SpatialIntersects(StreamInput in) throws IOException { + super(in, false, false); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public ShapeField.QueryRelation queryRelation() { return ShapeField.QueryRelation.INTERSECTS; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java index 064df31e35cb2..68005ecbfed47 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.ShapeField; import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; @@ -19,8 +20,6 @@ import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes; @@ -36,28 +35,20 @@ import static org.apache.lucene.document.ShapeField.QueryRelation.CONTAINS; import static org.apache.lucene.document.ShapeField.QueryRelation.DISJOINT; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; -import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; -import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; -import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; -import static org.elasticsearch.xpack.esql.core.type.DataType.isNull; -import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isSpatial; import static org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesUtils.asGeometryDocValueReader; import static org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesUtils.asLuceneComponent2D; -public abstract class SpatialRelatesFunction extends BinaryScalarFunction +public abstract class SpatialRelatesFunction extends BinarySpatialFunction implements EvaluatorMapper, SpatialEvaluatorFactory.SpatialSourceSupplier { - protected SpatialCrsType crsType; - protected final boolean leftDocValues; - protected final boolean rightDocValues; protected SpatialRelatesFunction(Source source, Expression left, Expression right, boolean leftDocValues, boolean rightDocValues) { - super(source, left, right); - this.leftDocValues = leftDocValues; - this.rightDocValues = rightDocValues; + super(source, left, right, leftDocValues, rightDocValues, false); + } + + protected SpatialRelatesFunction(StreamInput in, boolean leftDocValues, boolean rightDocValues) throws IOException { + super(in, leftDocValues, rightDocValues, false); } public abstract ShapeField.QueryRelation queryRelation(); @@ -67,94 +58,6 @@ public DataType dataType() { return DataType.BOOLEAN; } - @Override - public SpatialCrsType crsType() { - if (crsType == null) { - resolveType(); - } - return crsType; - } - - @Override - protected TypeResolution resolveType() { - if (left().foldable() && right().foldable() == false || isNull(left().dataType())) { - // Left is literal, but right is not, check the left field's type against the right field - return resolveType(right(), left(), SECOND, FIRST); - } else { - // All other cases check the right against the left - return resolveType(left(), right(), FIRST, SECOND); - } - } - - private TypeResolution resolveType( - Expression leftExpression, - Expression rightExpression, - TypeResolutions.ParamOrdinal leftOrdinal, - TypeResolutions.ParamOrdinal rightOrdinal - ) { - TypeResolution leftResolution = isSpatial(leftExpression, sourceText(), leftOrdinal); - TypeResolution rightResolution = isSpatial(rightExpression, sourceText(), rightOrdinal); - if (leftResolution.resolved()) { - return resolveType(leftExpression, rightExpression, rightOrdinal); - } else if (rightResolution.resolved()) { - return resolveType(rightExpression, leftExpression, leftOrdinal); - } else { - return leftResolution; - } - } - - protected TypeResolution resolveType( - Expression spatialExpression, - Expression otherExpression, - TypeResolutions.ParamOrdinal otherParamOrdinal - ) { - if (isNull(spatialExpression.dataType())) { - return isSpatial(otherExpression, sourceText(), otherParamOrdinal); - } - TypeResolution resolution = isSameSpatialType(spatialExpression.dataType(), otherExpression, sourceText(), otherParamOrdinal); - if (resolution.unresolved()) { - return resolution; - } - setCrsType(spatialExpression.dataType()); - return TypeResolution.TYPE_RESOLVED; - } - - protected void setCrsType(DataType dataType) { - crsType = SpatialCrsType.fromDataType(dataType); - } - - public static TypeResolution isSameSpatialType( - DataType spatialDataType, - Expression expression, - String operationName, - TypeResolutions.ParamOrdinal paramOrd - ) { - return isType( - expression, - dt -> EsqlDataTypes.isSpatial(dt) && spatialCRSCompatible(spatialDataType, dt), - operationName, - paramOrd, - compatibleTypeNames(spatialDataType) - ); - } - - private static final String[] GEO_TYPE_NAMES = new String[] { GEO_POINT.typeName(), GEO_SHAPE.typeName() }; - private static final String[] CARTESIAN_TYPE_NAMES = new String[] { GEO_POINT.typeName(), GEO_SHAPE.typeName() }; - - private static boolean spatialCRSCompatible(DataType spatialDataType, DataType otherDataType) { - return EsqlDataTypes.isSpatialGeo(spatialDataType) && EsqlDataTypes.isSpatialGeo(otherDataType) - || EsqlDataTypes.isSpatialGeo(spatialDataType) == false && EsqlDataTypes.isSpatialGeo(otherDataType) == false; - } - - static String[] compatibleTypeNames(DataType spatialDataType) { - return EsqlDataTypes.isSpatialGeo(spatialDataType) ? GEO_TYPE_NAMES : CARTESIAN_TYPE_NAMES; - } - - @Override - public boolean foldable() { - return left().foldable() && right().foldable(); - } - /** * Mark the function as expecting the specified fields to arrive as doc-values. */ @@ -196,14 +99,6 @@ public boolean equals(Object obj) { return false; } - public boolean leftDocValues() { - return leftDocValues; - } - - public boolean rightDocValues() { - return rightDocValues; - } - /** * Produce a map of rules defining combinations of incoming types to the evaluator factory that should be used. */ @@ -236,24 +131,9 @@ protected boolean foundField(Expression expression, Set foundAtt return expression instanceof FieldAttribute field && foundAttributes.contains(field); } - protected enum SpatialCrsType { - GEO, - CARTESIAN, - UNSPECIFIED; - - public static SpatialCrsType fromDataType(DataType dataType) { - return EsqlDataTypes.isSpatialGeo(dataType) ? SpatialCrsType.GEO - : EsqlDataTypes.isSpatial(dataType) ? SpatialCrsType.CARTESIAN - : SpatialCrsType.UNSPECIFIED; - } - } - - protected static class SpatialRelations { + protected static class SpatialRelations extends BinarySpatialComparator { protected final ShapeField.QueryRelation queryRelation; - protected final SpatialCoordinateTypes spatialCoordinateType; - protected final CoordinateEncoder coordinateEncoder; protected final ShapeIndexer shapeIndexer; - protected final SpatialCrsType crsType; protected SpatialRelations( ShapeField.QueryRelation queryRelation, @@ -261,11 +141,14 @@ protected SpatialRelations( CoordinateEncoder encoder, ShapeIndexer shapeIndexer ) { + super(spatialCoordinateType, encoder); this.queryRelation = queryRelation; - this.spatialCoordinateType = spatialCoordinateType; - this.coordinateEncoder = encoder; this.shapeIndexer = shapeIndexer; - this.crsType = spatialCoordinateType.equals(SpatialCoordinateTypes.GEO) ? SpatialCrsType.GEO : SpatialCrsType.CARTESIAN; + } + + @Override + protected Boolean compare(BytesRef left, BytesRef right) throws IOException { + return geometryRelatesGeometry(left, right); } protected boolean geometryRelatesGeometry(BytesRef left, BytesRef right) throws IOException { @@ -273,10 +156,6 @@ protected boolean geometryRelatesGeometry(BytesRef left, BytesRef right) throws return geometryRelatesGeometry(left, rightComponent2D); } - protected Geometry fromBytesRef(BytesRef bytesRef) { - return SpatialCoordinateTypes.UNSPECIFIED.wkbToGeometry(bytesRef); - } - protected boolean geometryRelatesGeometry(BytesRef left, Component2D rightComponent2D) throws IOException { Geometry leftGeom = fromBytesRef(left); // We already have a Component2D for the right geometry, so we need to convert the left geometry to a doc-values byte array diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesUtils.java index 3278eaac43d0d..600c3529acd13 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesUtils.java @@ -36,12 +36,12 @@ public class SpatialRelatesUtils { * This function is used to convert a spatial constant to a lucene Component2D. * When both left and right sides are constants, we convert the left to a doc-values byte array and the right to a Component2D. */ - static Component2D asLuceneComponent2D(SpatialRelatesFunction.SpatialCrsType crsType, Expression expression) { + static Component2D asLuceneComponent2D(BinarySpatialFunction.SpatialCrsType crsType, Expression expression) { return asLuceneComponent2D(crsType, makeGeometryFromLiteral(expression)); } - static Component2D asLuceneComponent2D(SpatialRelatesFunction.SpatialCrsType crsType, Geometry geometry) { - if (crsType == SpatialRelatesFunction.SpatialCrsType.GEO) { + static Component2D asLuceneComponent2D(BinarySpatialFunction.SpatialCrsType crsType, Geometry geometry) { + if (crsType == BinarySpatialFunction.SpatialCrsType.GEO) { var luceneGeometries = LuceneGeometriesUtils.toLatLonGeometry(geometry, true, t -> {}); return LatLonGeometry.create(luceneGeometries); } else { @@ -55,12 +55,12 @@ static Component2D asLuceneComponent2D(SpatialRelatesFunction.SpatialCrsType crs * When both left and right sides are constants, we convert the left to a doc-values byte array and the right to a Component2D[]. * The reason for generating an array instead of a single component is for multi-shape support with ST_CONTAINS. */ - static Component2D[] asLuceneComponent2Ds(SpatialRelatesFunction.SpatialCrsType crsType, Expression expression) { + static Component2D[] asLuceneComponent2Ds(BinarySpatialFunction.SpatialCrsType crsType, Expression expression) { return asLuceneComponent2Ds(crsType, makeGeometryFromLiteral(expression)); } - static Component2D[] asLuceneComponent2Ds(SpatialRelatesFunction.SpatialCrsType crsType, Geometry geometry) { - if (crsType == SpatialRelatesFunction.SpatialCrsType.GEO) { + static Component2D[] asLuceneComponent2Ds(BinarySpatialFunction.SpatialCrsType crsType, Geometry geometry) { + if (crsType == BinarySpatialFunction.SpatialCrsType.GEO) { var luceneGeometries = LuceneGeometriesUtils.toLatLonGeometry(geometry, true, t -> {}); return LuceneComponent2DUtils.createLatLonComponents(luceneGeometries); } else { @@ -73,10 +73,10 @@ static Component2D[] asLuceneComponent2Ds(SpatialRelatesFunction.SpatialCrsType * This function is used to convert a spatial constant to a doc-values byte array. * When both left and right sides are constants, we convert the left to a doc-values byte array and the right to a Component2D. */ - static GeometryDocValueReader asGeometryDocValueReader(SpatialRelatesFunction.SpatialCrsType crsType, Expression expression) + static GeometryDocValueReader asGeometryDocValueReader(BinarySpatialFunction.SpatialCrsType crsType, Expression expression) throws IOException { Geometry geometry = makeGeometryFromLiteral(expression); - if (crsType == SpatialRelatesFunction.SpatialCrsType.GEO) { + if (crsType == BinarySpatialFunction.SpatialCrsType.GEO) { return asGeometryDocValueReader( CoordinateEncoder.GEO, new GeoShapeIndexer(Orientation.CCW, "SpatialRelatesFunction"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java index 6568fd42d44c7..f72571a4b5250 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java @@ -11,6 +11,8 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.geometry.Geometry; @@ -49,6 +51,12 @@ * Here we simply wire the rules together specific to ST_WITHIN and QueryRelation.WITHIN. */ public class SpatialWithin extends SpatialRelatesFunction implements SurrogateExpression { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "SpatialWithin", + SpatialWithin::new + ); + // public for test access with reflection public static final SpatialRelations GEO = new SpatialRelations( ShapeField.QueryRelation.WITHIN, @@ -73,20 +81,14 @@ public class SpatialWithin extends SpatialRelatesFunction implements SurrogateEx ) public SpatialWithin( Source source, - @Param( - name = "geomA", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`." - ) Expression left, - @Param( - name = "geomB", - type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, - description = "Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. " - + "If `null`, the function returns `null`.\n" - + "The second parameter must also have the same coordinate system as the first.\n" - + "This means it is not possible to combine `geo_*` and `cartesian_*` parameters." - ) Expression right + @Param(name = "geomA", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`.""") Expression left, + @Param(name = "geomB", type = { "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" }, description = """ + Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. + If `null`, the function returns `null`. + The second parameter must also have the same coordinate system as the first. + This means it is not possible to combine `geo_*` and `cartesian_*` parameters.""") Expression right ) { this(source, left, right, false, false); } @@ -95,6 +97,15 @@ public SpatialWithin( super(source, left, right, leftDocValues, rightDocValues); } + private SpatialWithin(StreamInput in) throws IOException { + super(in, false, false); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public ShapeField.QueryRelation queryRelation() { return ShapeField.QueryRelation.WITHIN; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java new file mode 100644 index 0000000000000..1fdd4241aa222 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java @@ -0,0 +1,288 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.SloppyMath; +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; + +import java.io.IOException; +import java.util.function.Function; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; +import static org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesUtils.makeGeometryFromLiteral; + +/** + * Computes the distance between two points. + * For cartesian geometries, this is the pythagorean distance in the same units as the original coordinates. + * For geographic geometries, this is the circular distance along the great circle in meters. + * The function `st_distance` is defined in the OGC Simple Feature Access standard. + * Alternatively it is described in PostGIS documentation at PostGIS:ST_Distance. + */ +public class StDistance extends BinarySpatialFunction implements EvaluatorMapper { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "StDistance", + StDistance::new + ); + + // public for test access with reflection + public static final DistanceCalculator GEO = new GeoDistanceCalculator(); + // public for test access with reflection + public static final DistanceCalculator CARTESIAN = new CartesianDistanceCalculator(); + + protected static class GeoDistanceCalculator extends DistanceCalculator { + protected GeoDistanceCalculator() { + super(SpatialCoordinateTypes.GEO, CoordinateEncoder.GEO); + } + + @Override + protected double distance(Point left, Point right) { + return SloppyMath.haversinMeters( + GeoUtils.quantizeLat(left.getY()), + GeoUtils.quantizeLon(left.getX()), + GeoUtils.quantizeLat(right.getY()), + GeoUtils.quantizeLon(right.getX()) + ); + } + } + + protected static class CartesianDistanceCalculator extends DistanceCalculator { + + protected CartesianDistanceCalculator() { + super(SpatialCoordinateTypes.CARTESIAN, CoordinateEncoder.CARTESIAN); + } + + @Override + protected double distance(Point left, Point right) { + final double diffX = left.getX() - right.getX(); + final double diffY = left.getY() - right.getY(); + return Math.sqrt(diffX * diffX + diffY * diffY); + } + } + + /** + * This class is a CRS specific interface for generalizing distance calculations for the various possible ways + * that the geometries can be provided, from source, from evals, from literals and from doc values. + */ + public abstract static class DistanceCalculator extends BinarySpatialComparator { + + protected DistanceCalculator(SpatialCoordinateTypes spatialCoordinateType, CoordinateEncoder encoder) { + super(spatialCoordinateType, encoder); + } + + @Override + protected Double compare(BytesRef left, BytesRef right) throws IOException { + return distance(left, right); + } + + protected abstract double distance(Point left, Point right); + + protected double distance(long encoded, Geometry right) { + Point point = spatialCoordinateType.longAsPoint(encoded); + return distance(point, (Point) right); + } + + protected double distance(Geometry left, Geometry right) { + return distance((Point) left, (Point) right); + } + + public double distance(BytesRef left, BytesRef right) { + return distance(this.fromBytesRef(left), this.fromBytesRef(right)); + } + + public double distance(BytesRef left, Point right) { + return distance(this.fromBytesRef(left), right); + } + } + + @FunctionInfo( + returnType = "double", + description = """ + Computes the distance between two points. + For cartesian geometries, this is the pythagorean distance in the same units as the original coordinates. + For geographic geometries, this is the circular distance along the great circle in meters.""", + examples = @Example(file = "spatial", tag = "st_distance-airports") + ) + public StDistance( + Source source, + @Param(name = "geomA", type = { "geo_point", "cartesian_point" }, description = """ + Expression of type `geo_point` or `cartesian_point`. + If `null`, the function returns `null`.""") Expression left, + @Param(name = "geomB", type = { "geo_point", "cartesian_point" }, description = """ + Expression of type `geo_point` or `cartesian_point`. + If `null`, the function returns `null`. + The second parameter must also have the same coordinate system as the first. + This means it is not possible to combine `geo_point` and `cartesian_point` parameters.""") Expression right + ) { + super(source, left, right, false, false, true); + } + + protected StDistance(Source source, Expression left, Expression right, boolean leftDocValues, boolean rightDocValues) { + super(source, left, right, leftDocValues, rightDocValues, true); + } + + private StDistance(StreamInput in) throws IOException { + super(in, false, false, true); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public DataType dataType() { + return DOUBLE; + } + + @Override + protected StDistance replaceChildren(Expression newLeft, Expression newRight) { + return new StDistance(source(), newLeft, newRight, leftDocValues, rightDocValues); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StDistance::new, left(), right()); + } + + @Override + public Object fold() { + var leftGeom = makeGeometryFromLiteral(left()); + var rightGeom = makeGeometryFromLiteral(right()); + return (crsType == SpatialCrsType.GEO) ? GEO.distance(leftGeom, rightGeom) : CARTESIAN.distance(leftGeom, rightGeom); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + if (right().foldable()) { + return toEvaluator(toEvaluator, left(), makeGeometryFromLiteral(right()), leftDocValues); + } else if (left().foldable()) { + return toEvaluator(toEvaluator, right(), makeGeometryFromLiteral(left()), rightDocValues); + } else { + EvalOperator.ExpressionEvaluator.Factory leftE = toEvaluator.apply(left()); + EvalOperator.ExpressionEvaluator.Factory rightE = toEvaluator.apply(right()); + if (crsType() == SpatialCrsType.GEO) { + if (leftDocValues) { + return new StDistanceGeoPointDocValuesAndSourceEvaluator.Factory(source(), leftE, rightE); + } else if (rightDocValues) { + return new StDistanceGeoPointDocValuesAndSourceEvaluator.Factory(source(), rightE, leftE); + } else { + return new StDistanceGeoSourceAndSourceEvaluator.Factory(source(), leftE, rightE); + } + } else if (crsType() == SpatialCrsType.CARTESIAN) { + if (leftDocValues) { + return new StDistanceCartesianPointDocValuesAndSourceEvaluator.Factory(source(), leftE, rightE); + } else if (rightDocValues) { + return new StDistanceCartesianPointDocValuesAndSourceEvaluator.Factory(source(), rightE, leftE); + } else { + return new StDistanceCartesianSourceAndSourceEvaluator.Factory(source(), leftE, rightE); + } + } + } + throw EsqlIllegalArgumentException.illegalDataType(crsType().name()); + } + + private EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator, + Expression field, + Geometry geometry, + boolean docValues + ) { + if (geometry instanceof Point point) { + return toEvaluator(toEvaluator, field, point, docValues); + } else { + throw new IllegalArgumentException("Unsupported geometry type for ST_DISTANCE: " + geometry.type().name()); + } + } + + private EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator, + Expression field, + Point point, + boolean docValues + ) { + EvalOperator.ExpressionEvaluator.Factory fieldEvaluator = toEvaluator.apply(field); + if (crsType() == SpatialCrsType.GEO) { + if (docValues) { + return new StDistanceGeoPointDocValuesAndConstantEvaluator.Factory(source(), fieldEvaluator, point); + } else { + return new StDistanceGeoSourceAndConstantEvaluator.Factory(source(), fieldEvaluator, point); + } + } else if (crsType() == SpatialCrsType.CARTESIAN) { + if (docValues) { + return new StDistanceCartesianPointDocValuesAndConstantEvaluator.Factory(source(), fieldEvaluator, point); + } else { + return new StDistanceCartesianSourceAndConstantEvaluator.Factory(source(), fieldEvaluator, point); + } + } + throw EsqlIllegalArgumentException.illegalDataType(crsType().name()); + } + + @Evaluator(extraName = "GeoSourceAndConstant", warnExceptions = { IllegalArgumentException.class, IOException.class }) + static double processGeoSourceAndConstant(BytesRef leftValue, @Fixed Point rightValue) throws IOException { + return GEO.distance(leftValue, rightValue); + } + + @Evaluator(extraName = "GeoSourceAndSource", warnExceptions = { IllegalArgumentException.class, IOException.class }) + static double processGeoSourceAndSource(BytesRef leftValue, BytesRef rightValue) throws IOException { + return GEO.distance(leftValue, rightValue); + } + + @Evaluator(extraName = "GeoPointDocValuesAndConstant", warnExceptions = { IllegalArgumentException.class }) + static double processGeoPointDocValuesAndConstant(long leftValue, @Fixed Point rightValue) { + return GEO.distance(leftValue, rightValue); + } + + @Evaluator(extraName = "GeoPointDocValuesAndSource", warnExceptions = { IllegalArgumentException.class }) + static double processGeoPointDocValuesAndSource(long leftValue, BytesRef rightValue) { + Geometry geometry = SpatialCoordinateTypes.UNSPECIFIED.wkbToGeometry(rightValue); + return GEO.distance(leftValue, geometry); + } + + @Evaluator(extraName = "CartesianSourceAndConstant", warnExceptions = { IllegalArgumentException.class, IOException.class }) + static double processCartesianSourceAndConstant(BytesRef leftValue, @Fixed Point rightValue) throws IOException { + return CARTESIAN.distance(leftValue, rightValue); + } + + @Evaluator(extraName = "CartesianSourceAndSource", warnExceptions = { IllegalArgumentException.class, IOException.class }) + static double processCartesianSourceAndSource(BytesRef leftValue, BytesRef rightValue) throws IOException { + return CARTESIAN.distance(leftValue, rightValue); + } + + @Evaluator(extraName = "CartesianPointDocValuesAndConstant", warnExceptions = { IllegalArgumentException.class }) + static double processCartesianPointDocValuesAndConstant(long leftValue, @Fixed Point rightValue) { + return CARTESIAN.distance(leftValue, rightValue); + } + + @Evaluator(extraName = "CartesianPointDocValuesAndSource") + static double processCartesianPointDocValuesAndSource(long leftValue, BytesRef rightValue) { + Geometry geometry = SpatialCoordinateTypes.UNSPECIFIED.wkbToGeometry(rightValue); + return CARTESIAN.distance(leftValue, geometry); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java index 69464787f9288..23ee942bcf53a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; import java.util.List; @@ -36,8 +35,6 @@ import static org.elasticsearch.common.unit.ByteSizeUnit.MB; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; -import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; /** * Join strings. @@ -63,16 +60,16 @@ public Concat( private Concat(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) ); } @Override public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); - ((PlanStreamOutput) out).writeExpression(children().get(0)); - out.writeCollection(children().subList(1, children().size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + out.writeNamedWriteable(children().get(0)); + out.writeNamedWriteableCollection(children().subList(1, children().size())); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWith.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWith.java index 767563ed4112a..1d2b743fe5a7a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWith.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWith.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -18,7 +21,9 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -28,6 +33,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; public class EndsWith extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "EndsWith", EndsWith::new); private final Expression str; private final Expression suffix; @@ -55,6 +61,22 @@ public EndsWith( this.suffix = suffix; } + private EndsWith(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(str); + out.writeNamedWriteable(suffix); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.BOOLEAN; @@ -107,4 +129,12 @@ protected NodeInfo info() { public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { return new EndsWithEvaluator.Factory(source(), toEvaluator.apply(str), toEvaluator.apply(suffix)); } + + Expression str() { + return str; + } + + Expression suffix() { + return suffix; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java index 384874e173658..b0e5b41f971e1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java @@ -9,6 +9,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -21,7 +24,9 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -35,6 +40,8 @@ * {code left(foo, len)} is an alias to {code substring(foo, 0, len)} */ public class Left extends EsqlScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Left", Left::new); + private final Expression str; private final Expression length; @@ -53,6 +60,22 @@ public Left( this.length = length; } + private Left(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(str); + out.writeNamedWriteable(length); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Evaluator static BytesRef process( @Fixed(includeInToString = false, build = true) BytesRef out, @@ -120,4 +143,12 @@ protected TypeResolution resolveType() { public boolean foldable() { return str.foldable() && length.foldable(); } + + Expression str() { + return str; + } + + Expression length() { + return length; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java index 1669a64ec83d2..ea088bdc412e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Locate.java @@ -9,18 +9,23 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -35,6 +40,7 @@ * Locate function, given a string 'a' and a substring 'b', it returns the index of the first occurrence of the substring 'b' in 'a'. */ public class Locate extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Locate", Locate::new); private final Expression str; private final Expression substr; @@ -42,7 +48,7 @@ public class Locate extends EsqlScalarFunction implements OptionalArgument { @FunctionInfo( returnType = "integer", - description = "Returns an integer that indicates the position of a keyword substring within another string", + description = "Returns an integer that indicates the position of a keyword substring within another string.", examples = @Example(file = "string", tag = "locate") ) public Locate( @@ -61,6 +67,28 @@ public Locate( this.start = start; } + private Locate(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(str); + out.writeNamedWriteable(substr); + out.writeOptionalNamedWriteable(start); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.INTEGER; @@ -142,4 +170,16 @@ public ExpressionEvaluator.Factory toEvaluator(Function info() { return NodeInfo.create(this, RLike::new, field(), pattern(), caseInsensitive()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java index e8ad0a83829fe..3ff28e08f4ce1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java @@ -8,20 +8,25 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -33,6 +38,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; public class Repeat extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Repeat", Repeat::new); static final long MAX_REPEATED_LENGTH = MB.toBytes(1); @@ -54,6 +60,22 @@ public Repeat( this.number = number; } + private Repeat(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(str); + out.writeNamedWriteable(number); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.KEYWORD; @@ -145,4 +167,12 @@ public ExpressionEvaluator.Factory toEvaluator(Function info() { public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { return new StartsWithEvaluator.Factory(source(), toEvaluator.apply(str), toEvaluator.apply(prefix)); } + + Expression str() { + return str; + } + + Expression prefix() { + return prefix; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java index 94b9f06b63b5d..7c2ecd0c60e49 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java @@ -9,19 +9,24 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -33,12 +38,17 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; public class Substring extends EsqlScalarFunction implements OptionalArgument { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "Substring", + Substring::new + ); private final Expression str, start, length; @FunctionInfo( returnType = "keyword", - description = "Returns a substring of a string, specified by a start position and an optional length", + description = "Returns a substring of a string, specified by a start position and an optional length.", examples = { @Example(file = "docs", tag = "substring", description = "This example returns the first three characters of every last name:"), @Example(file = "docs", tag = "substringEnd", description = """ @@ -69,6 +79,28 @@ public Substring( this.length = length; } + private Substring(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readOptionalNamedWriteable(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(str); + out.writeNamedWriteable(start); + out.writeOptionalNamedWriteable(length); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override public DataType dataType() { return DataType.KEYWORD; @@ -157,4 +189,16 @@ public ExpressionEvaluator.Factory toEvaluator(Function info() { return NodeInfo.create(this, WildcardLike::new, field(), pattern()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java index 7ab6d96181f53..647071c44cfd3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java @@ -116,8 +116,8 @@ public interface BinaryEvaluator { ) throws IOException { this( Source.readFrom((PlanStreamInput) in), - ((PlanStreamInput) in).readExpression(), - ((PlanStreamInput) in).readExpression(), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), op, ints, longs, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java index a4559e10eaf3a..52d4c111b2eae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java @@ -16,14 +16,12 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import java.io.IOException; @@ -51,24 +49,44 @@ public interface BinaryOperatorConstructor { public enum BinaryComparisonOperation implements Writeable { - EQ(0, "==", BinaryComparisonProcessor.BinaryComparisonOperation.EQ, Equals::new), + EQ(0, "==", org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.EQ, Equals::new), // id 1 reserved for NullEquals - NEQ(2, "!=", BinaryComparisonProcessor.BinaryComparisonOperation.NEQ, NotEquals::new), - GT(3, ">", BinaryComparisonProcessor.BinaryComparisonOperation.GT, GreaterThan::new), - GTE(4, ">=", BinaryComparisonProcessor.BinaryComparisonOperation.GTE, GreaterThanOrEqual::new), - LT(5, "<", BinaryComparisonProcessor.BinaryComparisonOperation.LT, LessThan::new), - LTE(6, "<=", BinaryComparisonProcessor.BinaryComparisonOperation.LTE, LessThanOrEqual::new); + NEQ( + 2, + "!=", + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.NEQ, + NotEquals::new + ), + GT( + 3, + ">", + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.GT, + GreaterThan::new + ), + GTE( + 4, + ">=", + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.GTE, + GreaterThanOrEqual::new + ), + LT(5, "<", org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.LT, LessThan::new), + LTE( + 6, + "<=", + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.LTE, + LessThanOrEqual::new + ); private final int id; private final String symbol; // Temporary mapping to the old enum, to satisfy the superclass constructor signature. - private final BinaryComparisonProcessor.BinaryComparisonOperation shim; + private final org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation shim; private final BinaryOperatorConstructor constructor; BinaryComparisonOperation( int id, String symbol, - BinaryComparisonProcessor.BinaryComparisonOperation shim, + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation shim, BinaryOperatorConstructor constructor ) { this.id = id; @@ -129,8 +147,8 @@ public static EsqlBinaryComparison readFrom(StreamInput in) throws IOException { // TODO this uses a constructor on the operation *and* a name which is confusing. It only needs one. Everything else uses a name. var source = Source.readFrom((PlanStreamInput) in); EsqlBinaryComparison.BinaryComparisonOperation operation = EsqlBinaryComparison.BinaryComparisonOperation.readFromStream(in); - var left = ((PlanStreamInput) in).readExpression(); - var right = ((PlanStreamInput) in).readExpression(); + var left = in.readNamedWriteable(Expression.class); + var right = in.readNamedWriteable(Expression.class); // TODO: Remove zoneId entirely var zoneId = in.readOptionalZoneId(); return operation.buildNewInstance(source, left, right); @@ -140,8 +158,8 @@ public static EsqlBinaryComparison readFrom(StreamInput in) throws IOException { public final void writeTo(StreamOutput out) throws IOException { source().writeTo(out); functionType.writeTo(out); - ((PlanStreamOutput) out).writeExpression(left()); - ((PlanStreamOutput) out).writeExpression(right()); + out.writeNamedWriteable(left()); + out.writeNamedWriteable(right()); out.writeOptionalZoneId(zoneId()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java index 17fca1e1cff88..b7ebf114501cd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/In.java @@ -7,17 +7,22 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.InProcessor; +import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.Comparisons; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import java.io.IOException; import java.util.List; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; @@ -25,6 +30,8 @@ import static org.elasticsearch.xpack.esql.core.util.StringUtils.ordinal; public class In extends org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.In { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "In", In::new); + @FunctionInfo( returnType = "boolean", description = "The `IN` operator allows testing whether a field or expression equals an element in a list of literals, " @@ -35,6 +42,26 @@ public In(Source source, Expression value, List list) { super(source, value, list); } + private In(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(value()); + out.writeNamedWriteableCollection(list()); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, In::new, value(), list()); @@ -60,7 +87,20 @@ public Boolean fold() { // QL's `In` fold() doesn't handle BytesRef and can't know if this is Keyword/Text, Version or IP anyway. // `In` allows comparisons of same type only (safe for numerics), so it's safe to apply InProcessor directly with no implicit // (non-numerical) conversions. - return InProcessor.apply(value().fold(), list().stream().map(Expression::fold).toList()); + return apply(value().fold(), list().stream().map(Expression::fold).toList()); + } + + private static Boolean apply(Object input, List values) { + Boolean result = Boolean.FALSE; + for (Object v : values) { + Boolean compResult = Comparisons.eq(input, v); + if (compResult == null) { + result = null; + } else if (compResult == Boolean.TRUE) { + return Boolean.TRUE; + } + } + return result; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 74e8661596e41..e4051523c7a5e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -9,9 +9,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; @@ -25,83 +23,24 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.FullTextPredicate; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RegexMatch; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.esql.core.index.EsIndex; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; -import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Median; -import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; -import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; -import org.elasticsearch.xpack.esql.expression.function.aggregate.TopList; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; -import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; -import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; -import org.elasticsearch.xpack.esql.expression.function.scalar.ip.IpPrefix; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan2; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.E; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Log; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pi; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tau; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialContains; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Locate; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Repeat; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Replace; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Right; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Dissect.Parser; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.TopN; import org.elasticsearch.xpack.esql.plan.logical.join.Join; @@ -132,14 +71,10 @@ import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.BiFunction; -import java.util.function.Function; -import static java.util.Map.entry; import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.Entry.of; import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; @@ -169,11 +104,6 @@ public static String name(Class cls) { return cls.getSimpleName(); } - static final Class QL_UNARY_SCLR_CLS = - org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction.class; - - static final Class ESQL_UNARY_SCLR_CLS = UnaryScalarFunction.class; - /** * List of named type entries that link concrete names to stream reader and writer implementations. * Entries have the form: category, name, serializer method, deserializer method. @@ -209,7 +139,7 @@ public static List namedTypeEntries() { of(PhysicalPlan.class, ShowExec.class, PlanNamedTypes::writeShowExec, PlanNamedTypes::readShowExec), of(PhysicalPlan.class, TopNExec.class, PlanNamedTypes::writeTopNExec, PlanNamedTypes::readTopNExec), // Logical Plan Nodes - a subset of plans that end up being actually serialized - of(LogicalPlan.class, Aggregate.class, PlanNamedTypes::writeAggregate, PlanNamedTypes::readAggregate), + of(LogicalPlan.class, Aggregate.class, Aggregate::writeAggregate, Aggregate::new), of(LogicalPlan.class, Dissect.class, PlanNamedTypes::writeDissect, PlanNamedTypes::readDissect), of(LogicalPlan.class, EsRelation.class, PlanNamedTypes::writeEsRelation, PlanNamedTypes::readEsRelation), of(LogicalPlan.class, Eval.class, PlanNamedTypes::writeEval, PlanNamedTypes::readEval), @@ -224,73 +154,9 @@ public static List namedTypeEntries() { of(LogicalPlan.class, MvExpand.class, PlanNamedTypes::writeMvExpand, PlanNamedTypes::readMvExpand), of(LogicalPlan.class, OrderBy.class, PlanNamedTypes::writeOrderBy, PlanNamedTypes::readOrderBy), of(LogicalPlan.class, Project.class, PlanNamedTypes::writeProject, PlanNamedTypes::readProject), - of(LogicalPlan.class, TopN.class, PlanNamedTypes::writeTopN, PlanNamedTypes::readTopN), - // InComparison - of(ScalarFunction.class, In.class, PlanNamedTypes::writeInComparison, PlanNamedTypes::readInComparison), - // RegexMatch - of(RegexMatch.class, WildcardLike.class, PlanNamedTypes::writeWildcardLike, PlanNamedTypes::readWildcardLike), - of(RegexMatch.class, RLike.class, PlanNamedTypes::writeRLike, PlanNamedTypes::readRLike), - // BinaryLogic - of(BinaryLogic.class, And.class, PlanNamedTypes::writeBinaryLogic, PlanNamedTypes::readBinaryLogic), - of(BinaryLogic.class, Or.class, PlanNamedTypes::writeBinaryLogic, PlanNamedTypes::readBinaryLogic), - // ScalarFunction - of(ScalarFunction.class, Atan2.class, PlanNamedTypes::writeAtan2, PlanNamedTypes::readAtan2), - of(ScalarFunction.class, CIDRMatch.class, PlanNamedTypes::writeCIDRMatch, PlanNamedTypes::readCIDRMatch), - of(ScalarFunction.class, E.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), - of(ScalarFunction.class, IpPrefix.class, (out, prefix) -> prefix.writeTo(out), IpPrefix::readFrom), - of(ScalarFunction.class, Log.class, PlanNamedTypes::writeLog, PlanNamedTypes::readLog), - of(ScalarFunction.class, Pi.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), - of(ScalarFunction.class, Round.class, PlanNamedTypes::writeRound, PlanNamedTypes::readRound), - of(ScalarFunction.class, Pow.class, PlanNamedTypes::writePow, PlanNamedTypes::readPow), - of(ScalarFunction.class, StartsWith.class, PlanNamedTypes::writeStartsWith, PlanNamedTypes::readStartsWith), - of(ScalarFunction.class, EndsWith.class, PlanNamedTypes::writeEndsWith, PlanNamedTypes::readEndsWith), - of(ScalarFunction.class, SpatialIntersects.class, PlanNamedTypes::writeSpatialRelatesFunction, PlanNamedTypes::readIntersects), - of(ScalarFunction.class, SpatialDisjoint.class, PlanNamedTypes::writeSpatialRelatesFunction, PlanNamedTypes::readDisjoint), - of(ScalarFunction.class, SpatialContains.class, PlanNamedTypes::writeSpatialRelatesFunction, PlanNamedTypes::readContains), - of(ScalarFunction.class, SpatialWithin.class, PlanNamedTypes::writeSpatialRelatesFunction, PlanNamedTypes::readWithin), - of(ScalarFunction.class, Substring.class, PlanNamedTypes::writeSubstring, PlanNamedTypes::readSubstring), - of(ScalarFunction.class, Locate.class, PlanNamedTypes::writeLocate, PlanNamedTypes::readLocate), - of(ScalarFunction.class, Left.class, PlanNamedTypes::writeLeft, PlanNamedTypes::readLeft), - of(ScalarFunction.class, Repeat.class, PlanNamedTypes::writeRepeat, PlanNamedTypes::readRepeat), - of(ScalarFunction.class, Right.class, PlanNamedTypes::writeRight, PlanNamedTypes::readRight), - of(ScalarFunction.class, Split.class, PlanNamedTypes::writeSplit, PlanNamedTypes::readSplit), - of(ScalarFunction.class, Tau.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), - of(ScalarFunction.class, Replace.class, PlanNamedTypes::writeReplace, PlanNamedTypes::readReplace), - // GroupingFunctions - of(GroupingFunction.class, Bucket.class, PlanNamedTypes::writeBucket, PlanNamedTypes::readBucket), - // AggregateFunctions - of(AggregateFunction.class, Avg.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, Count.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, CountDistinct.class, PlanNamedTypes::writeCountDistinct, PlanNamedTypes::readCountDistinct), - of(AggregateFunction.class, Min.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, Max.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, Median.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, MedianAbsoluteDeviation.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, Percentile.class, PlanNamedTypes::writePercentile, PlanNamedTypes::readPercentile), - of(AggregateFunction.class, SpatialCentroid.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, Sum.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction), - of(AggregateFunction.class, TopList.class, (out, prefix) -> prefix.writeTo(out), TopList::readFrom), - of(AggregateFunction.class, Values.class, PlanNamedTypes::writeAggFunction, PlanNamedTypes::readAggFunction) + of(LogicalPlan.class, TopN.class, PlanNamedTypes::writeTopN, PlanNamedTypes::readTopN) ); - List entries = new ArrayList<>(declared); - - // From NamedWriteables - for (List ee : List.of( - AbstractMultivalueFunction.getNamedWriteables(), - EsqlArithmeticOperation.getNamedWriteables(), - EsqlBinaryComparison.getNamedWriteables(), - EsqlScalarFunction.getNamedWriteables(), - FullTextPredicate.getNamedWriteables(), - NamedExpression.getNamedWriteables(), - UnaryScalarFunction.getNamedWriteables(), - List.of(UnsupportedAttribute.ENTRY, Literal.ENTRY, org.elasticsearch.xpack.esql.expression.Order.ENTRY) - )) { - for (NamedWriteableRegistry.Entry e : ee) { - entries.add(of(Expression.class, e)); - } - } - - return entries; + return declared; } // -- physical plan nodes @@ -298,7 +164,7 @@ static AggregateExec readAggregateExec(PlanStreamInput in) throws IOException { return new AggregateExec( Source.readFrom(in), in.readPhysicalPlanNode(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)), + in.readNamedWriteableCollectionAsList(Expression.class), in.readNamedWriteableCollectionAsList(NamedExpression.class), in.readEnum(AggregateExec.Mode.class), in.readOptionalVInt() @@ -308,7 +174,7 @@ static AggregateExec readAggregateExec(PlanStreamInput in) throws IOException { static void writeAggregateExec(PlanStreamOutput out, AggregateExec aggregateExec) throws IOException { Source.EMPTY.writeTo(out); out.writePhysicalPlanNode(aggregateExec.child()); - out.writeCollection(aggregateExec.groupings(), writerFromPlanWriter(PlanStreamOutput::writeExpression)); + out.writeNamedWriteableCollection(aggregateExec.groupings()); out.writeNamedWriteableCollection(aggregateExec.aggregates()); out.writeEnum(aggregateExec.getMode()); out.writeOptionalVInt(aggregateExec.estimatedRowSize()); @@ -318,7 +184,7 @@ static DissectExec readDissectExec(PlanStreamInput in) throws IOException { return new DissectExec( Source.readFrom(in), in.readPhysicalPlanNode(), - in.readExpression(), + in.readNamedWriteable(Expression.class), readDissectParser(in), in.readNamedWriteableCollectionAsList(Attribute.class) ); @@ -327,7 +193,7 @@ static DissectExec readDissectExec(PlanStreamInput in) throws IOException { static void writeDissectExec(PlanStreamOutput out, DissectExec dissectExec) throws IOException { Source.EMPTY.writeTo(out); out.writePhysicalPlanNode(dissectExec.child()); - out.writeExpression(dissectExec.inputExpression()); + out.writeNamedWriteable(dissectExec.inputExpression()); writeDissectParser(out, dissectExec.parser()); out.writeNamedWriteableCollection(dissectExec.extractedFields()); } @@ -352,7 +218,7 @@ static void writeEsQueryExec(PlanStreamOutput out, EsQueryExec esQueryExec) thro writeIndexMode(out, esQueryExec.indexMode()); out.writeNamedWriteableCollection(esQueryExec.output()); out.writeOptionalNamedWriteable(esQueryExec.query()); - out.writeOptionalExpression(esQueryExec.limit()); + out.writeOptionalNamedWriteable(esQueryExec.limit()); out.writeOptionalCollection(esQueryExec.sorts(), writerFromPlanWriter(PlanNamedTypes::writeFieldSort)); out.writeOptionalInt(esQueryExec.estimatedRowSize()); } @@ -511,13 +377,13 @@ static void writeFieldExtractExec(PlanStreamOutput out, FieldExtractExec fieldEx } static FilterExec readFilterExec(PlanStreamInput in) throws IOException { - return new FilterExec(Source.readFrom(in), in.readPhysicalPlanNode(), in.readExpression()); + return new FilterExec(Source.readFrom(in), in.readPhysicalPlanNode(), in.readNamedWriteable(Expression.class)); } static void writeFilterExec(PlanStreamOutput out, FilterExec filterExec) throws IOException { Source.EMPTY.writeTo(out); out.writePhysicalPlanNode(filterExec.child()); - out.writeExpression(filterExec.condition()); + out.writeNamedWriteable(filterExec.condition()); } static FragmentExec readFragmentExec(PlanStreamInput in) throws IOException { @@ -545,7 +411,7 @@ static GrokExec readGrokExec(PlanStreamInput in) throws IOException { return new GrokExec( source = Source.readFrom(in), in.readPhysicalPlanNode(), - in.readExpression(), + in.readNamedWriteable(Expression.class), Grok.pattern(source, in.readString()), in.readNamedWriteableCollectionAsList(Attribute.class) ); @@ -554,19 +420,19 @@ static GrokExec readGrokExec(PlanStreamInput in) throws IOException { static void writeGrokExec(PlanStreamOutput out, GrokExec grokExec) throws IOException { Source.EMPTY.writeTo(out); out.writePhysicalPlanNode(grokExec.child()); - out.writeExpression(grokExec.inputExpression()); + out.writeNamedWriteable(grokExec.inputExpression()); out.writeString(grokExec.pattern().pattern()); out.writeNamedWriteableCollection(grokExec.extractedFields()); } static LimitExec readLimitExec(PlanStreamInput in) throws IOException { - return new LimitExec(Source.readFrom(in), in.readPhysicalPlanNode(), in.readNamed(Expression.class)); + return new LimitExec(Source.readFrom(in), in.readPhysicalPlanNode(), in.readNamedWriteable(Expression.class)); } static void writeLimitExec(PlanStreamOutput out, LimitExec limitExec) throws IOException { Source.EMPTY.writeTo(out); out.writePhysicalPlanNode(limitExec.child()); - out.writeExpression(limitExec.limit()); + out.writeNamedWriteable(limitExec.limit()); } static MvExpandExec readMvExpandExec(PlanStreamInput in) throws IOException { @@ -643,7 +509,7 @@ static TopNExec readTopNExec(PlanStreamInput in) throws IOException { Source.readFrom(in), in.readPhysicalPlanNode(), in.readCollectionAsList(org.elasticsearch.xpack.esql.expression.Order::new), - in.readNamed(Expression.class), + in.readNamedWriteable(Expression.class), in.readOptionalVInt() ); } @@ -652,32 +518,15 @@ static void writeTopNExec(PlanStreamOutput out, TopNExec topNExec) throws IOExce Source.EMPTY.writeTo(out); out.writePhysicalPlanNode(topNExec.child()); out.writeCollection(topNExec.order()); - out.writeExpression(topNExec.limit()); + out.writeNamedWriteable(topNExec.limit()); out.writeOptionalVInt(topNExec.estimatedRowSize()); } - // -- Logical plan nodes - static Aggregate readAggregate(PlanStreamInput in) throws IOException { - return new Aggregate( - Source.readFrom(in), - in.readLogicalPlanNode(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)), - in.readNamedWriteableCollectionAsList(NamedExpression.class) - ); - } - - static void writeAggregate(PlanStreamOutput out, Aggregate aggregate) throws IOException { - Source.EMPTY.writeTo(out); - out.writeLogicalPlanNode(aggregate.child()); - out.writeCollection(aggregate.groupings(), writerFromPlanWriter(PlanStreamOutput::writeExpression)); - out.writeNamedWriteableCollection(aggregate.aggregates()); - } - static Dissect readDissect(PlanStreamInput in) throws IOException { return new Dissect( Source.readFrom(in), in.readLogicalPlanNode(), - in.readExpression(), + in.readNamedWriteable(Expression.class), readDissectParser(in), in.readNamedWriteableCollectionAsList(Attribute.class) ); @@ -686,7 +535,7 @@ static Dissect readDissect(PlanStreamInput in) throws IOException { static void writeDissect(PlanStreamOutput out, Dissect dissect) throws IOException { Source.EMPTY.writeTo(out); out.writeLogicalPlanNode(dissect.child()); - out.writeExpression(dissect.input()); + out.writeNamedWriteable(dissect.input()); writeDissectParser(out, dissect.parser()); out.writeNamedWriteableCollection(dissect.extractedFields()); } @@ -755,7 +604,7 @@ static Enrich readEnrich(PlanStreamInput in) throws IOException { } final Source source = Source.readFrom(in); final LogicalPlan child = in.readLogicalPlanNode(); - final Expression policyName = in.readExpression(); + final Expression policyName = in.readNamedWriteable(Expression.class); final NamedExpression matchField = in.readNamedWriteable(NamedExpression.class); if (in.getTransportVersion().before(TransportVersions.V_8_13_0)) { in.readString(); // discard the old policy name @@ -790,7 +639,7 @@ static void writeEnrich(PlanStreamOutput out, Enrich enrich) throws IOException Source.EMPTY.writeTo(out); out.writeLogicalPlanNode(enrich.child()); - out.writeExpression(enrich.policyName()); + out.writeNamedWriteable(enrich.policyName()); out.writeNamedWriteable(enrich.matchField()); if (out.getTransportVersion().before(TransportVersions.V_8_13_0)) { out.writeString(BytesRefs.toString(enrich.policyName().fold())); // old policy name @@ -822,13 +671,13 @@ static void writeEsqlProject(PlanStreamOutput out, EsqlProject project) throws I } static Filter readFilter(PlanStreamInput in) throws IOException { - return new Filter(Source.readFrom(in), in.readLogicalPlanNode(), in.readExpression()); + return new Filter(Source.readFrom(in), in.readLogicalPlanNode(), in.readNamedWriteable(Expression.class)); } static void writeFilter(PlanStreamOutput out, Filter filter) throws IOException { Source.EMPTY.writeTo(out); out.writeLogicalPlanNode(filter.child()); - out.writeExpression(filter.condition()); + out.writeNamedWriteable(filter.condition()); } static Grok readGrok(PlanStreamInput in) throws IOException { @@ -836,7 +685,7 @@ static Grok readGrok(PlanStreamInput in) throws IOException { return new Grok( source = Source.readFrom(in), in.readLogicalPlanNode(), - in.readExpression(), + in.readNamedWriteable(Expression.class), Grok.pattern(source, in.readString()), in.readNamedWriteableCollectionAsList(Attribute.class) ); @@ -845,18 +694,18 @@ static Grok readGrok(PlanStreamInput in) throws IOException { static void writeGrok(PlanStreamOutput out, Grok grok) throws IOException { Source.EMPTY.writeTo(out); out.writeLogicalPlanNode(grok.child()); - out.writeExpression(grok.input()); + out.writeNamedWriteable(grok.input()); out.writeString(grok.parser().pattern()); out.writeNamedWriteableCollection(grok.extractedFields()); } static Limit readLimit(PlanStreamInput in) throws IOException { - return new Limit(Source.readFrom(in), in.readNamed(Expression.class), in.readLogicalPlanNode()); + return new Limit(Source.readFrom(in), in.readNamedWriteable(Expression.class), in.readLogicalPlanNode()); } static void writeLimit(PlanStreamOutput out, Limit limit) throws IOException { Source.EMPTY.writeTo(out); - out.writeExpression(limit.limit()); + out.writeNamedWriteable(limit.limit()); out.writeLogicalPlanNode(limit.child()); } @@ -905,7 +754,7 @@ static TopN readTopN(PlanStreamInput in) throws IOException { Source.readFrom(in), in.readLogicalPlanNode(), in.readCollectionAsList(org.elasticsearch.xpack.esql.expression.Order::new), - in.readExpression() + in.readNamedWriteable(Expression.class) ); } @@ -913,354 +762,7 @@ static void writeTopN(PlanStreamOutput out, TopN topN) throws IOException { Source.EMPTY.writeTo(out); out.writeLogicalPlanNode(topN.child()); out.writeCollection(topN.order()); - out.writeExpression(topN.limit()); - } - - // -- InComparison - - static In readInComparison(PlanStreamInput in) throws IOException { - return new In( - Source.readFrom(in), - in.readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) - ); - } - - static void writeInComparison(PlanStreamOutput out, In in) throws IOException { - in.source().writeTo(out); - out.writeExpression(in.value()); - out.writeCollection(in.list(), writerFromPlanWriter(PlanStreamOutput::writeExpression)); - } - - // -- RegexMatch - - static WildcardLike readWildcardLike(PlanStreamInput in, String name) throws IOException { - return new WildcardLike(Source.readFrom(in), in.readExpression(), new WildcardPattern(in.readString())); - } - - static void writeWildcardLike(PlanStreamOutput out, WildcardLike like) throws IOException { - like.source().writeTo(out); - out.writeExpression(like.field()); - out.writeString(like.pattern().pattern()); - } - - static RLike readRLike(PlanStreamInput in, String name) throws IOException { - return new RLike(Source.readFrom(in), in.readExpression(), new RLikePattern(in.readString())); - } - - static void writeRLike(PlanStreamOutput out, RLike like) throws IOException { - like.source().writeTo(out); - out.writeExpression(like.field()); - out.writeString(like.pattern().asJavaRegex()); - } - - // -- BinaryLogic - - static final Map> BINARY_LOGIC_CTRS = Map.ofEntries( - entry(name(And.class), And::new), - entry(name(Or.class), Or::new) - ); - - static BinaryLogic readBinaryLogic(PlanStreamInput in, String name) throws IOException { - var source = Source.readFrom(in); - var left = in.readExpression(); - var right = in.readExpression(); - return BINARY_LOGIC_CTRS.get(name).apply(source, left, right); - } - - static void writeBinaryLogic(PlanStreamOutput out, BinaryLogic binaryLogic) throws IOException { - Source.EMPTY.writeTo(out); - out.writeExpression(binaryLogic.left()); - out.writeExpression(binaryLogic.right()); - } - - static final Map> NO_ARG_SCALAR_CTRS = Map.ofEntries( - entry(name(E.class), E::new), - entry(name(Pi.class), Pi::new), - entry(name(Tau.class), Tau::new) - ); - - static ScalarFunction readNoArgScalar(PlanStreamInput in, String name) throws IOException { - var ctr = NO_ARG_SCALAR_CTRS.get(name); - if (ctr == null) { - throw new IOException("Constructor not found:" + name); - } - return ctr.apply(Source.readFrom(in)); - } - - static void writeNoArgScalar(PlanStreamOutput out, ScalarFunction function) throws IOException { - Source.EMPTY.writeTo(out); - } - - static final Map< - String, - BiFunction< - Source, - Expression, - org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction>> QL_UNARY_SCALAR_CTRS = Map.ofEntries( - entry(name(IsNotNull.class), IsNotNull::new), - entry(name(IsNull.class), IsNull::new), - entry(name(Not.class), Not::new) - ); - - static org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction readQLUnaryScalar( - PlanStreamInput in, - String name - ) throws IOException { - var ctr = QL_UNARY_SCALAR_CTRS.get(name); - if (ctr == null) { - throw new IOException("Constructor for QLUnaryScalar not found for name:" + name); - } - return ctr.apply(Source.readFrom(in), in.readExpression()); - } - - static void writeQLUnaryScalar( - PlanStreamOutput out, - org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction function - ) throws IOException { - function.source().writeTo(out); - out.writeExpression(function.field()); - } - - // -- ScalarFunction - - static Atan2 readAtan2(PlanStreamInput in) throws IOException { - return new Atan2(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeAtan2(PlanStreamOutput out, Atan2 atan2) throws IOException { - atan2.source().writeTo(out); - out.writeExpression(atan2.y()); - out.writeExpression(atan2.x()); - } - - static Bucket readBucket(PlanStreamInput in) throws IOException { - return new Bucket( - Source.readFrom(in), - in.readExpression(), - in.readExpression(), - in.readOptionalNamed(Expression.class), - in.readOptionalNamed(Expression.class) - ); - } - - static void writeBucket(PlanStreamOutput out, Bucket bucket) throws IOException { - bucket.source().writeTo(out); - out.writeExpression(bucket.field()); - out.writeExpression(bucket.buckets()); - out.writeOptionalExpression(bucket.from()); - out.writeOptionalExpression(bucket.to()); - } - - static CountDistinct readCountDistinct(PlanStreamInput in) throws IOException { - return new CountDistinct(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeCountDistinct(PlanStreamOutput out, CountDistinct countDistinct) throws IOException { - List fields = countDistinct.children(); - assert fields.size() == 1 || fields.size() == 2; - Source.EMPTY.writeTo(out); - out.writeExpression(fields.get(0)); - out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); - } - - static SpatialIntersects readIntersects(PlanStreamInput in) throws IOException { - return new SpatialIntersects(Source.EMPTY, in.readExpression(), in.readExpression()); - } - - static SpatialDisjoint readDisjoint(PlanStreamInput in) throws IOException { - return new SpatialDisjoint(Source.EMPTY, in.readExpression(), in.readExpression()); - } - - static SpatialContains readContains(PlanStreamInput in) throws IOException { - return new SpatialContains(Source.EMPTY, in.readExpression(), in.readExpression()); - } - - static SpatialWithin readWithin(PlanStreamInput in) throws IOException { - return new SpatialWithin(Source.EMPTY, in.readExpression(), in.readExpression()); - } - - static void writeSpatialRelatesFunction(PlanStreamOutput out, SpatialRelatesFunction spatialRelatesFunction) throws IOException { - out.writeExpression(spatialRelatesFunction.left()); - out.writeExpression(spatialRelatesFunction.right()); - } - - static Round readRound(PlanStreamInput in) throws IOException { - return new Round(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeRound(PlanStreamOutput out, Round round) throws IOException { - round.source().writeTo(out); - out.writeExpression(round.field()); - out.writeOptionalExpression(round.decimals()); - } - - static Pow readPow(PlanStreamInput in) throws IOException { - return new Pow(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writePow(PlanStreamOutput out, Pow pow) throws IOException { - pow.source().writeTo(out); - out.writeExpression(pow.base()); - out.writeExpression(pow.exponent()); - } - - static Percentile readPercentile(PlanStreamInput in) throws IOException { - return new Percentile(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writePercentile(PlanStreamOutput out, Percentile percentile) throws IOException { - List fields = percentile.children(); - assert fields.size() == 2 : "percentile() aggregation must have two arguments"; - Source.EMPTY.writeTo(out); - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - - static StartsWith readStartsWith(PlanStreamInput in) throws IOException { - return new StartsWith(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeStartsWith(PlanStreamOutput out, StartsWith startsWith) throws IOException { - startsWith.source().writeTo(out); - List fields = startsWith.children(); - assert fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - - static EndsWith readEndsWith(PlanStreamInput in) throws IOException { - return new EndsWith(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeEndsWith(PlanStreamOutput out, EndsWith endsWith) throws IOException { - List fields = endsWith.children(); - assert fields.size() == 2; - Source.EMPTY.writeTo(out); - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - - static Substring readSubstring(PlanStreamInput in) throws IOException { - return new Substring(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeSubstring(PlanStreamOutput out, Substring substring) throws IOException { - substring.source().writeTo(out); - List fields = substring.children(); - assert fields.size() == 2 || fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); - } - - static Locate readLocate(PlanStreamInput in) throws IOException { - return new Locate(Source.readFrom(in), in.readExpression(), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeLocate(PlanStreamOutput out, Locate locate) throws IOException { - locate.source().writeTo(out); - List fields = locate.children(); - assert fields.size() == 2 || fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); - } - - static Replace readReplace(PlanStreamInput in) throws IOException { - return new Replace(Source.EMPTY, in.readExpression(), in.readExpression(), in.readExpression()); - } - - static void writeReplace(PlanStreamOutput out, Replace replace) throws IOException { - List fields = replace.children(); - assert fields.size() == 3; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - out.writeExpression(fields.get(2)); - } - - static Left readLeft(PlanStreamInput in) throws IOException { - return new Left(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeLeft(PlanStreamOutput out, Left left) throws IOException { - left.source().writeTo(out); - List fields = left.children(); - assert fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - - static Repeat readRepeat(PlanStreamInput in) throws IOException { - return new Repeat(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeRepeat(PlanStreamOutput out, Repeat repeat) throws IOException { - repeat.source().writeTo(out); - List fields = repeat.children(); - assert fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - - static Right readRight(PlanStreamInput in) throws IOException { - return new Right(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeRight(PlanStreamOutput out, Right right) throws IOException { - right.source().writeTo(out); - List fields = right.children(); - assert fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeExpression(fields.get(1)); - } - - static Split readSplit(PlanStreamInput in) throws IOException { - return new Split(Source.readFrom(in), in.readExpression(), in.readExpression()); - } - - static void writeSplit(PlanStreamOutput out, Split split) throws IOException { - split.source().writeTo(out); - out.writeExpression(split.left()); - out.writeExpression(split.right()); - } - - static CIDRMatch readCIDRMatch(PlanStreamInput in) throws IOException { - return new CIDRMatch( - Source.readFrom(in), - in.readExpression(), - in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) - ); - } - - static void writeCIDRMatch(PlanStreamOutput out, CIDRMatch cidrMatch) throws IOException { - cidrMatch.source().writeTo(out); - List children = cidrMatch.children(); - assert children.size() > 1; - out.writeExpression(children.get(0)); - out.writeCollection(children.subList(1, children.size()), writerFromPlanWriter(PlanStreamOutput::writeExpression)); - } - - // -- Aggregations - static final Map> AGG_CTRS = Map.ofEntries( - entry(name(Avg.class), Avg::new), - entry(name(Count.class), Count::new), - entry(name(Sum.class), Sum::new), - entry(name(Min.class), Min::new), - entry(name(Max.class), Max::new), - entry(name(Median.class), Median::new), - entry(name(MedianAbsoluteDeviation.class), MedianAbsoluteDeviation::new), - entry(name(SpatialCentroid.class), SpatialCentroid::new), - entry(name(Values.class), Values::new) - ); - - static AggregateFunction readAggFunction(PlanStreamInput in, String name) throws IOException { - return AGG_CTRS.get(name).apply(Source.readFrom(in), in.readExpression()); - } - - static void writeAggFunction(PlanStreamOutput out, AggregateFunction aggregateFunction) throws IOException { - Source.EMPTY.writeTo(out); - out.writeExpression(aggregateFunction.field()); + out.writeNamedWriteable(topN.limit()); } // -- ancillary supporting classes of plan nodes, etc @@ -1304,16 +806,4 @@ static void writeDissectParser(PlanStreamOutput out, Parser dissectParser) throw out.writeString(dissectParser.pattern()); out.writeString(dissectParser.appendSeparator()); } - - static Log readLog(PlanStreamInput in) throws IOException { - return new Log(Source.readFrom(in), in.readExpression(), in.readOptionalNamed(Expression.class)); - } - - static void writeLog(PlanStreamOutput out, Log log) throws IOException { - log.source().writeTo(out); - List fields = log.children(); - assert fields.size() == 1 || fields.size() == 2; - out.writeExpression(fields.get(0)); - out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index 0b671d6b90c7e..0633595a5796d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -23,11 +23,10 @@ import org.elasticsearch.compute.data.LongBigArrayBlock; import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.Column; -import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NameId; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanNamedReader; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -92,11 +91,6 @@ public PhysicalPlan readOptionalPhysicalPlanNode() throws IOException { return readOptionalNamed(PhysicalPlan.class); } - @Override - public Expression readExpression() throws IOException { - return readNamed(Expression.class); - } - public T readNamed(Class type) throws IOException { String name = readString(); @SuppressWarnings("unchecked") @@ -120,18 +114,6 @@ public T readOptionalNamed(Class type) throws IOException { } } - public T readOptionalWithReader(PlanReader reader) throws IOException { - if (readBoolean()) { - T t = reader.read(this); - if (t == null) { - throwOnNullOptionalRead(reader); - } - return t; - } else { - return null; - } - } - public EsqlConfiguration configuration() throws IOException { return configuration; } @@ -220,12 +202,6 @@ static void throwOnNullOptionalRead(Class type) throws IOException { throw e; } - static void throwOnNullOptionalRead(PlanReader reader) throws IOException { - final IOException e = new IOException("read optional named returned null which is not allowed, reader:" + reader); - assert false : e; - throw e; - } - @Override public NameId mapNameId(long l) { return nameIdFunction.apply(l); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index 45662d13e2618..674476ec4f736 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -19,9 +19,8 @@ import org.elasticsearch.compute.data.LongBigArrayBlock; import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.esql.Column; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -35,7 +34,7 @@ * A customized stream output used to serialize ESQL physical plan fragments. Complements stream * output with methods that write plan nodes, Attributes, Expressions, etc. */ -public final class PlanStreamOutput extends StreamOutput implements org.elasticsearch.xpack.esql.core.util.PlanStreamOutput { +public final class PlanStreamOutput extends StreamOutput { /** * Cache of written blocks. We use an {@link IdentityHashMap} for this @@ -94,20 +93,6 @@ public void writeOptionalPhysicalPlanNode(PhysicalPlan physicalPlan) throws IOEx } } - @Override - public void writeExpression(Expression expression) throws IOException { - writeNamed(Expression.class, expression); - } - - public void writeOptionalExpression(Expression expression) throws IOException { - if (expression == null) { - writeBoolean(false); - } else { - writeBoolean(true); - writeExpression(expression); - } - } - public void writeNamed(Class type, T value) throws IOException { String name = nameSupplier.apply(value.getClass()); @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java index 384d3a8cea840..9a2ae742c2feb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java @@ -21,11 +21,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.esql.core.rule.Rule; @@ -34,11 +29,17 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules; import org.elasticsearch.xpack.esql.optimizer.rules.PropagateEmptyRelation; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.TopN; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; @@ -53,9 +54,9 @@ import static java.util.Arrays.asList; import static java.util.Collections.emptySet; -import static org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.UP; import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.cleanup; import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.operators; +import static org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection.UP; public class LocalLogicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -162,14 +163,16 @@ else if (plan instanceof Project project) { plan = new Eval(project.source(), project.child(), new ArrayList<>(nullLiteral.values())); plan = new Project(project.source(), plan, newProjections); } - } - // otherwise transform fields in place - else { - plan = plan.transformExpressionsOnlyUp( - FieldAttribute.class, - f -> stats.exists(f.qualifiedName()) ? f : Literal.of(f, null) - ); - } + } else if (plan instanceof Eval + || plan instanceof Filter + || plan instanceof OrderBy + || plan instanceof RegexExtract + || plan instanceof TopN) { + plan = plan.transformExpressionsOnlyUp( + FieldAttribute.class, + f -> stats.exists(f.qualifiedName()) ? f : Literal.of(f, null) + ); + } return plan; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 5eb024d410992..c03dc46216621 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -7,23 +7,31 @@ package org.elasticsearch.xpack.esql.optimizer; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.Tuple; +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; import org.elasticsearch.xpack.esql.core.expression.TypedAttribute; import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; @@ -34,6 +42,7 @@ import org.elasticsearch.xpack.esql.core.querydsl.query.Query; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.esql.core.rule.Rule; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Queries; import org.elasticsearch.xpack.esql.core.util.Queries.Clause; @@ -41,8 +50,13 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesUtils; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveBinaryComparison; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; @@ -64,6 +78,7 @@ import org.elasticsearch.xpack.esql.planner.EsqlTranslatorHandler; import org.elasticsearch.xpack.esql.stats.SearchStats; +import java.nio.ByteOrder; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -77,7 +92,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.esql.core.expression.predicate.Predicates.splitAnd; -import static org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.UP; +import static org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection.UP; import static org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.StatsType.COUNT; public class LocalPhysicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -103,13 +118,14 @@ PhysicalPlan verify(PhysicalPlan plan) { protected List> rules(boolean optimizeForEsSource) { List> esSourceRules = new ArrayList<>(4); - esSourceRules.add(new ReplaceAttributeSourceWithDocId()); + esSourceRules.add(new ReplaceSourceAttributes()); if (optimizeForEsSource) { esSourceRules.add(new PushTopNToSource()); esSourceRules.add(new PushLimitToSource()); esSourceRules.add(new PushFiltersToSource()); esSourceRules.add(new PushStatsToSource()); + esSourceRules.add(new EnableSpatialDistancePushdown()); } // execute the rules multiple times to improve the chances of things being pushed down @@ -126,15 +142,32 @@ protected List> batches() { return rules(true); } - private static class ReplaceAttributeSourceWithDocId extends OptimizerRule { + private static class ReplaceSourceAttributes extends OptimizerRule { - ReplaceAttributeSourceWithDocId() { + ReplaceSourceAttributes() { super(UP); } @Override protected PhysicalPlan rule(EsSourceExec plan) { - return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), plan.query()); + var docId = new FieldAttribute(plan.source(), EsQueryExec.DOC_ID_FIELD.getName(), EsQueryExec.DOC_ID_FIELD); + if (plan.indexMode() == IndexMode.TIME_SERIES) { + Attribute tsid = null, timestamp = null; + for (Attribute attr : plan.output()) { + String name = attr.name(); + if (name.equals(MetadataAttribute.TSID_FIELD)) { + tsid = attr; + } else if (name.equals(MetadataAttribute.TIMESTAMP_FIELD)) { + timestamp = attr; + } + } + if (tsid == null || timestamp == null) { + throw new IllegalStateException("_tsid or @timestamp are missing from the time-series source"); + } + return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId, tsid, timestamp), plan.query()); + } else { + return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId), plan.query()); + } } } @@ -551,4 +584,133 @@ private boolean allowedForDocValues(FieldAttribute fieldAttribute, AggregateExec return spatialRelatesAttributes.size() < 2; } } + + /** + * When a spatial distance predicate can be pushed down to lucene, this is done by capturing the distance within the same function. + * In principle this is like re-writing the predicate: + *
      WHERE ST_DISTANCE(field, TO_GEOPOINT("POINT(0 0)")) <= 10000
      + * as: + *
      WHERE ST_INTERSECTS(field, TO_GEOSHAPE("CIRCLE(0,0,10000)"))
      + */ + public static class EnableSpatialDistancePushdown extends PhysicalOptimizerRules.ParameterizedOptimizerRule< + FilterExec, + LocalPhysicalOptimizerContext> { + + @Override + protected PhysicalPlan rule(FilterExec filterExec, LocalPhysicalOptimizerContext ctx) { + PhysicalPlan plan = filterExec; + if (filterExec.child() instanceof EsQueryExec) { + // Find and rewrite any binary comparisons that involve a distance function and a literal + var rewritten = filterExec.condition().transformDown(EsqlBinaryComparison.class, comparison -> { + ComparisonType comparisonType = ComparisonType.from(comparison.getFunctionType()); + if (comparison.left() instanceof StDistance dist && comparison.right().foldable()) { + return rewriteComparison(comparison, dist, comparison.right(), comparisonType); + } else if (comparison.right() instanceof StDistance dist && comparison.left().foldable()) { + return rewriteComparison(comparison, dist, comparison.left(), ComparisonType.invert(comparisonType)); + } + return comparison; + }); + if (rewritten.equals(filterExec.condition()) == false) { + plan = new FilterExec(filterExec.source(), filterExec.child(), rewritten); + } + } + + return plan; + } + + private Expression rewriteComparison( + EsqlBinaryComparison comparison, + StDistance dist, + Expression literal, + ComparisonType comparisonType + ) { + Object value = literal.fold(); + if (value instanceof Number number) { + if (dist.right().foldable()) { + return rewriteDistanceFilter(comparison, dist.left(), dist.right(), number, comparisonType); + } else if (dist.left().foldable()) { + return rewriteDistanceFilter(comparison, dist.right(), dist.left(), number, comparisonType); + } + } + return comparison; + } + + private Expression rewriteDistanceFilter( + EsqlBinaryComparison comparison, + Expression spatialExp, + Expression literalExp, + Number number, + ComparisonType comparisonType + ) { + Geometry geometry = SpatialRelatesUtils.makeGeometryFromLiteral(literalExp); + if (geometry instanceof Point point) { + double distance = number.doubleValue(); + Source source = comparison.source(); + if (comparisonType.lt) { + distance = comparisonType.eq ? distance : Math.nextDown(distance); + return new SpatialIntersects(source, spatialExp, makeCircleLiteral(point, distance, literalExp)); + } else if (comparisonType.gt) { + distance = comparisonType.eq ? distance : Math.nextUp(distance); + return new SpatialDisjoint(source, spatialExp, makeCircleLiteral(point, distance, literalExp)); + } else if (comparisonType.eq) { + return new And( + source, + new SpatialIntersects(source, spatialExp, makeCircleLiteral(point, distance, literalExp)), + new SpatialDisjoint(source, spatialExp, makeCircleLiteral(point, Math.nextDown(distance), literalExp)) + ); + } + } + return comparison; + } + + private Literal makeCircleLiteral(Point point, double distance, Expression literalExpression) { + var circle = new Circle(point.getX(), point.getY(), distance); + var wkb = WellKnownBinary.toWKB(circle, ByteOrder.LITTLE_ENDIAN); + return new Literal(literalExpression.source(), new BytesRef(wkb), DataType.GEO_SHAPE); + } + + /** + * This enum captures the key differences between various inequalities as perceived from the spatial distance function. + * In particular, we need to know which direction the inequality points, with lt=true meaning the left is expected to be smaller + * than the right. And eq=true meaning we expect euality as well. We currently don't support Equals and NotEquals, so the third + * field disables those. + */ + enum ComparisonType { + LTE(true, false, true), + LT(true, false, false), + GTE(false, true, true), + GT(false, true, false), + EQ(false, false, true); + + private final boolean lt; + private final boolean gt; + private final boolean eq; + + ComparisonType(boolean lt, boolean gt, boolean eq) { + this.lt = lt; + this.gt = gt; + this.eq = eq; + } + + static ComparisonType from(EsqlBinaryComparison.BinaryComparisonOperation op) { + return switch (op) { + case LT -> LT; + case LTE -> LTE; + case GT -> GT; + case GTE -> GTE; + default -> EQ; + }; + } + + static ComparisonType invert(ComparisonType comparisonType) { + return switch (comparisonType) { + case LT -> GT; + case LTE -> GTE; + case GT -> LT; + case GTE -> LTE; + default -> EQ; + }; + } + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index aaf9f8e63d795..50819b8ee7480 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; @@ -17,9 +17,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Order; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.esql.optimizer.rules.AddDefaultTopN; @@ -66,8 +63,12 @@ import org.elasticsearch.xpack.esql.optimizer.rules.SplitInWithFoldableValue; import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSpatialSurrogates; import org.elasticsearch.xpack.esql.optimizer.rules.SubstituteSurrogates; +import org.elasticsearch.xpack.esql.optimizer.rules.TranslateMetricsAggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -115,6 +116,9 @@ protected static Batch substitutions() { new ReplaceStatsAggExpressionWithEval(), // lastly replace surrogate functions new SubstituteSurrogates(), + // translate metric aggregates after surrogate substitution and replace nested expressions with eval (again) + new TranslateMetricsAggregate(), + new ReplaceStatsNestedExpressionWithEval(), new ReplaceRegexMatch(), new ReplaceTrivialTypeConversions(), new ReplaceAliasingEvalWithProject(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index 2387a4a210de3..cd61b4eb8892c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -8,9 +8,9 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.capabilities.Validatable; -import org.elasticsearch.xpack.esql.core.common.Failures; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.LogicalPlanDependencyCheck; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; public final class LogicalVerifier { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 4c5d9efb449f7..bff76fb1a706e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -7,15 +7,15 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.xpack.esql.core.common.Failures; +import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.plan.QueryPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.Row; @@ -36,7 +36,7 @@ import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; -import static org.elasticsearch.xpack.esql.core.common.Failure.fail; +import static org.elasticsearch.xpack.esql.common.Failure.fail; class OptimizerRules { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java index 1def5a4133a3f..c669853d3357e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java @@ -8,10 +8,10 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection; import org.elasticsearch.xpack.esql.core.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.util.ReflectionUtils; +import org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; public class PhysicalOptimizerRules { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java index a0a3874a2c2de..e9fd6a713945c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java @@ -8,8 +8,9 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -21,7 +22,6 @@ import org.elasticsearch.xpack.esql.core.rule.RuleExecutor; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.Holder; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Project; @@ -121,8 +121,8 @@ public PhysicalPlan apply(PhysicalPlan plan) { } if (p instanceof HashJoinExec join) { attributes.removeAll(join.addedFields()); - for (Equals cond : join.conditions()) { - attributes.remove(cond.right()); + for (Attribute rhs : join.rightFields()) { + attributes.remove(rhs); } } if (p instanceof EnrichExec ee) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java index 77c8e7da5d895..7843464650e37 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalVerifier.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.optimizer.OptimizerRules.PhysicalPlanDependencyCheck; @@ -18,7 +18,7 @@ import java.util.LinkedHashSet; import java.util.Set; -import static org.elasticsearch.xpack.esql.core.common.Failure.fail; +import static org.elasticsearch.xpack.esql.common.Failure.fail; /** Physical plan verifier. */ public final class PhysicalVerifier { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/AddDefaultTopN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/AddDefaultTopN.java index 28a7ba4bf7084..9208eba740100 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/AddDefaultTopN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/AddDefaultTopN.java @@ -8,14 +8,14 @@ package org.elasticsearch.xpack.esql.optimizer.rules; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; /** * This adds an explicit TopN node to a plan that only has an OrderBy right before Lucene. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsElimination.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsElimination.java index cf62f9219f3c8..1cdc2c02c8469 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsElimination.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsElimination.java @@ -21,11 +21,10 @@ * This rule must always be placed after {@link LiteralsOnTheRight} * since it looks at TRUE/FALSE literals' existence on the right hand-side of the {@link Equals}/{@link NotEquals} expressions. */ -public final class BooleanFunctionEqualsElimination extends - org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.OptimizerExpressionRule { +public final class BooleanFunctionEqualsElimination extends OptimizerRules.OptimizerExpressionRule { public BooleanFunctionEqualsElimination() { - super(org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.UP); + super(OptimizerRules.TransformDirection.UP); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplification.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplification.java index b01525cc447fc..2a3f7fb9d1244 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplification.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplification.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; -public final class BooleanSimplification extends org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.BooleanSimplification { +public final class BooleanSimplification extends OptimizerRules.BooleanSimplification { public BooleanSimplification() { super(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsToIn.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsToIn.java index c34252300350c..2dc2f0e504303 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsToIn.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsToIn.java @@ -35,9 +35,9 @@ * This rule does NOT check for type compatibility as that phase has been * already be verified in the analyzer. */ -public final class CombineDisjunctionsToIn extends org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.OptimizerExpressionRule { +public final class CombineDisjunctionsToIn extends OptimizerRules.OptimizerExpressionRule { public CombineDisjunctionsToIn() { - super(org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.UP); + super(OptimizerRules.TransformDirection.UP); } protected In createIn(Expression key, List values, ZoneId zoneId) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineEvals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineEvals.java index 40e9836d0afa1..f8210d06e4439 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineEvals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineEvals.java @@ -7,10 +7,9 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; /** * Combine multiple Evals into one in order to reduce the number of nodes in a plan. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineProjections.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineProjections.java index 940c08ffb97f1..3c0ac9056c8c5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineProjections.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineProjections.java @@ -15,11 +15,10 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import java.util.ArrayList; import java.util.List; @@ -53,7 +52,7 @@ protected LogicalPlan rule(UnaryPlan plan) { // project can be fully removed if (newAggs != null) { var newGroups = replacePrunedAliasesUsedInGroupBy(a.groupings(), aggs, newAggs); - plan = new Aggregate(a.source(), a.child(), newGroups, newAggs); + plan = new Aggregate(a.source(), a.child(), a.aggregateType(), newGroups, newAggs); } } return plan; @@ -75,6 +74,7 @@ protected LogicalPlan rule(UnaryPlan plan) { plan = new Aggregate( a.source(), p.child(), + a.aggregateType(), combineUpperGroupingsAndLowerProjections(groupingAttrs, p.projections()), combineProjections(a.aggregates(), p.projections()) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFolding.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFolding.java index f2638333c9601..2178013c42148 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFolding.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFolding.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; public final class ConstantFolding extends OptimizerRules.OptimizerExpressionRule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConvertStringToByteRef.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConvertStringToByteRef.java index 384f56d96de73..a1969df3f898a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConvertStringToByteRef.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ConvertStringToByteRef.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import java.util.ArrayList; import java.util.List; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/DuplicateLimitAfterMvExpand.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/DuplicateLimitAfterMvExpand.java index 6b944bf7adf4f..ab1dc407a7a4a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/DuplicateLimitAfterMvExpand.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/DuplicateLimitAfterMvExpand.java @@ -9,18 +9,17 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; public final class DuplicateLimitAfterMvExpand extends OptimizerRules.OptimizerRule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNull.java index 25ad5e3966f21..6e01811b8527c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNull.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNull.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.optimizer.rules; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; public class FoldNull extends OptimizerRules.FoldNull { @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRight.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRight.java index 528fe65766972..36d39e0ee1c73 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRight.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRight.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; public final class LiteralsOnTheRight extends OptimizerRules.OptimizerExpressionRule> { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/OptimizerRules.java similarity index 63% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/optimizer/OptimizerRules.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/OptimizerRules.java index ba19a73f91c06..6f6260fd0de27 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/OptimizerRules.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.optimizer; +package org.elasticsearch.xpack.esql.optimizer.rules; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.esql.core.expression.Alias; @@ -12,36 +12,24 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.Nullability; -import org.elasticsearch.xpack.esql.core.expression.function.Function; import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.SurrogateFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryPredicate; import org.elasticsearch.xpack.esql.core.expression.predicate.Negatable; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.ReflectionUtils; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.function.BiFunction; @@ -56,34 +44,6 @@ import static org.elasticsearch.xpack.esql.core.util.CollectionUtils.combine; public final class OptimizerRules { - - /** - * This rule must always be placed after LiteralsOnTheRight, since it looks at TRUE/FALSE literals' existence - * on the right hand-side of the {@link Equals}/{@link NotEquals} expressions. - */ - public static final class BooleanFunctionEqualsElimination extends OptimizerExpressionRule { - - public BooleanFunctionEqualsElimination() { - super(TransformDirection.UP); - } - - @Override - protected Expression rule(BinaryComparison bc) { - if ((bc instanceof Equals || bc instanceof NotEquals) && bc.left() instanceof Function) { - // for expression "==" or "!=" TRUE/FALSE, return the expression itself or its negated variant - - if (TRUE.equals(bc.right())) { - return bc instanceof Equals ? bc.left() : new Not(bc.left().source(), bc.left()); - } - if (FALSE.equals(bc.right())) { - return bc instanceof Equals ? new Not(bc.left().source(), bc.left()) : bc.left(); - } - } - - return bc; - } - } - public static class BooleanSimplification extends OptimizerExpressionRule { public BooleanSimplification() { @@ -220,178 +180,6 @@ protected Expression maybeSimplifyNegatable(Expression e) { } } - /** - * Combine disjunctions on the same field into an In expression. - * This rule looks for both simple equalities: - * 1. a == 1 OR a == 2 becomes a IN (1, 2) - * and combinations of In - * 2. a == 1 OR a IN (2) becomes a IN (1, 2) - * 3. a IN (1) OR a IN (2) becomes a IN (1, 2) - * - * This rule does NOT check for type compatibility as that phase has been - * already be verified in the analyzer. - */ - public static class CombineDisjunctionsToIn extends OptimizerExpressionRule { - public CombineDisjunctionsToIn() { - super(TransformDirection.UP); - } - - @Override - protected Expression rule(Or or) { - Expression e = or; - // look only at equals and In - List exps = splitOr(e); - - Map> found = new LinkedHashMap<>(); - ZoneId zoneId = null; - List ors = new LinkedList<>(); - - for (Expression exp : exps) { - if (exp instanceof Equals eq) { - // consider only equals against foldables - if (eq.right().foldable()) { - found.computeIfAbsent(eq.left(), k -> new LinkedHashSet<>()).add(eq.right()); - } else { - ors.add(exp); - } - if (zoneId == null) { - zoneId = eq.zoneId(); - } - } else if (exp instanceof In in) { - found.computeIfAbsent(in.value(), k -> new LinkedHashSet<>()).addAll(in.list()); - if (zoneId == null) { - zoneId = in.zoneId(); - } - } else { - ors.add(exp); - } - } - - if (found.isEmpty() == false) { - // combine equals alongside the existing ors - final ZoneId finalZoneId = zoneId; - found.forEach( - (k, v) -> { ors.add(v.size() == 1 ? createEquals(k, v, finalZoneId) : createIn(k, new ArrayList<>(v), finalZoneId)); } - ); - - Expression combineOr = combineOr(ors); - // check the result semantically since the result might different in order - // but be actually the same which can trigger a loop - // e.g. a == 1 OR a == 2 OR null --> null OR a in (1,2) --> literalsOnTheRight --> cycle - if (e.semanticEquals(combineOr) == false) { - e = combineOr; - } - } - - return e; - } - - protected Equals createEquals(Expression k, Set v, ZoneId finalZoneId) { - return new Equals(k.source(), k, v.iterator().next(), finalZoneId); - } - - protected In createIn(Expression key, List values, ZoneId zoneId) { - return new In(key.source(), key, values, zoneId); - } - } - - public static class ReplaceSurrogateFunction extends OptimizerExpressionRule { - - public ReplaceSurrogateFunction() { - super(TransformDirection.DOWN); - } - - @Override - protected Expression rule(Expression e) { - if (e instanceof SurrogateFunction) { - e = ((SurrogateFunction) e).substitute(); - } - return e; - } - } - - public abstract static class PruneFilters extends OptimizerRule { - - @Override - protected LogicalPlan rule(Filter filter) { - Expression condition = filter.condition().transformUp(BinaryLogic.class, PruneFilters::foldBinaryLogic); - - if (condition instanceof Literal) { - if (TRUE.equals(condition)) { - return filter.child(); - } - if (FALSE.equals(condition) || Expressions.isNull(condition)) { - return skipPlan(filter); - } - } - - if (condition.equals(filter.condition()) == false) { - return new Filter(filter.source(), filter.child(), condition); - } - return filter; - } - - protected abstract LogicalPlan skipPlan(Filter filter); - - private static Expression foldBinaryLogic(BinaryLogic binaryLogic) { - if (binaryLogic instanceof Or or) { - boolean nullLeft = Expressions.isNull(or.left()); - boolean nullRight = Expressions.isNull(or.right()); - if (nullLeft && nullRight) { - return new Literal(binaryLogic.source(), null, DataType.NULL); - } - if (nullLeft) { - return or.right(); - } - if (nullRight) { - return or.left(); - } - } - if (binaryLogic instanceof And and) { - if (Expressions.isNull(and.left()) || Expressions.isNull(and.right())) { - return new Literal(binaryLogic.source(), null, DataType.NULL); - } - } - return binaryLogic; - } - } - - // NB: it is important to start replacing casts from the bottom to properly replace aliases - public abstract static class PruneCast extends Rule { - - private final Class castType; - - public PruneCast(Class castType) { - this.castType = castType; - } - - @Override - public final LogicalPlan apply(LogicalPlan plan) { - return rule(plan); - } - - protected final LogicalPlan rule(LogicalPlan plan) { - // eliminate redundant casts - return plan.transformExpressionsUp(castType, this::maybePruneCast); - } - - protected abstract Expression maybePruneCast(C cast); - } - - public abstract static class SkipQueryOnLimitZero extends OptimizerRule { - @Override - protected LogicalPlan rule(Limit limit) { - if (limit.limit().foldable()) { - if (Integer.valueOf(0).equals((limit.limit().fold()))) { - return skipPlan(limit); - } - } - return limit; - } - - protected abstract LogicalPlan skipPlan(Limit limit); - } - public static class FoldNull extends OptimizerExpressionRule { public FoldNull() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PartiallyFoldCase.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PartiallyFoldCase.java index 6b900d91eb061..78435f852982e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PartiallyFoldCase.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PartiallyFoldCase.java @@ -8,10 +8,9 @@ package org.elasticsearch.xpack.esql.optimizer.rules; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; -import static org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.DOWN; +import static org.elasticsearch.xpack.esql.optimizer.rules.OptimizerRules.TransformDirection.DOWN; /** * Fold the arms of {@code CASE} statements. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEmptyRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEmptyRelation.java index 8a3281dd7df81..c57e490423ce8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEmptyRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEmptyRelation.java @@ -13,13 +13,12 @@ import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.planner.PlannerUtils; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEquals.java index 5f08363abdbaf..8e5d203942c7a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEquals.java @@ -35,10 +35,10 @@ * When encountering a different Equals, non-containing {@link Range} or {@link BinaryComparison}, the conjunction becomes false. * When encountering a containing {@link Range}, {@link BinaryComparison} or {@link NotEquals}, these get eliminated by the equality. */ -public final class PropagateEquals extends org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.OptimizerExpressionRule { +public final class PropagateEquals extends OptimizerRules.OptimizerExpressionRule { public PropagateEquals() { - super(org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.DOWN); + super(OptimizerRules.TransformDirection.DOWN); } public Expression rule(BinaryLogic e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEvalFoldables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEvalFoldables.java index 872bff80926d6..9231105c9b663 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEvalFoldables.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEvalFoldables.java @@ -12,10 +12,10 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; /** * Replace any reference attribute with its source, if it does not affect the result. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullable.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullable.java index 73ea21f9c8191..08c560c326e81 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullable.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullable.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneColumns.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneColumns.java index cb0224c9c834d..baeabb534aa3c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneColumns.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneColumns.java @@ -13,12 +13,12 @@ import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -69,10 +69,22 @@ public LogicalPlan apply(LogicalPlan plan) { } else { // Aggs cannot produce pages with 0 columns, so retain one grouping. remaining = List.of(Expressions.attribute(aggregate.groupings().get(0))); - p = new Aggregate(aggregate.source(), aggregate.child(), aggregate.groupings(), remaining); + p = new Aggregate( + aggregate.source(), + aggregate.child(), + aggregate.aggregateType(), + aggregate.groupings(), + remaining + ); } } else { - p = new Aggregate(aggregate.source(), aggregate.child(), aggregate.groupings(), remaining); + p = new Aggregate( + aggregate.source(), + aggregate.child(), + aggregate.aggregateType(), + aggregate.groupings(), + remaining + ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneEmptyPlans.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneEmptyPlans.java index 5c9ef44207366..739d59d8b0df6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneEmptyPlans.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneEmptyPlans.java @@ -7,10 +7,9 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; public final class PruneEmptyPlans extends OptimizerRules.OptimizerRule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneFilters.java index 72df4261663e5..7e9ff7c5f5f02 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneFilters.java @@ -7,15 +7,60 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -public final class PruneFilters extends OptimizerRules.PruneFilters { +import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; +import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; +public final class PruneFilters extends OptimizerRules.OptimizerRule { @Override - protected LogicalPlan skipPlan(Filter filter) { - return LogicalPlanOptimizer.skipPlan(filter); + protected LogicalPlan rule(Filter filter) { + Expression condition = filter.condition().transformUp(BinaryLogic.class, PruneFilters::foldBinaryLogic); + + if (condition instanceof Literal) { + if (TRUE.equals(condition)) { + return filter.child(); + } + if (FALSE.equals(condition) || Expressions.isNull(condition)) { + return LogicalPlanOptimizer.skipPlan(filter); + } + } + + if (condition.equals(filter.condition()) == false) { + return new Filter(filter.source(), filter.child(), condition); + } + return filter; } + + private static Expression foldBinaryLogic(BinaryLogic binaryLogic) { + if (binaryLogic instanceof Or or) { + boolean nullLeft = Expressions.isNull(or.left()); + boolean nullRight = Expressions.isNull(or.right()); + if (nullLeft && nullRight) { + return new Literal(binaryLogic.source(), null, DataType.NULL); + } + if (nullLeft) { + return or.right(); + } + if (nullRight) { + return or.left(); + } + } + if (binaryLogic instanceof And and) { + if (Expressions.isNull(and.left()) || Expressions.isNull(and.right())) { + return new Literal(binaryLogic.source(), null, DataType.NULL); + } + } + return binaryLogic; + } + } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneLiteralsInOrderBy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneLiteralsInOrderBy.java index 591cfe043c00d..1fe67c2c435c2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneLiteralsInOrderBy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneLiteralsInOrderBy.java @@ -8,9 +8,8 @@ package org.elasticsearch.xpack.esql.optimizer.rules; import org.elasticsearch.xpack.esql.core.expression.Order; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import java.util.ArrayList; import java.util.List; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneOrderByBeforeStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneOrderByBeforeStats.java index 690bc92b1c338..f2ef524f2c91e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneOrderByBeforeStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneOrderByBeforeStats.java @@ -7,16 +7,15 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; public final class PruneOrderByBeforeStats extends OptimizerRules.OptimizerRule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneRedundantSortClauses.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneRedundantSortClauses.java index 3a9421ee7f159..dc68ae5981429 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneRedundantSortClauses.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PruneRedundantSortClauses.java @@ -9,9 +9,8 @@ import org.elasticsearch.xpack.esql.core.expression.ExpressionSet; import org.elasticsearch.xpack.esql.core.expression.Order; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineFilters.java index 647c5c3730157..48013e113fe43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineFilters.java @@ -12,18 +12,17 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import java.util.ArrayList; import java.util.List; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineLimits.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineLimits.java index 46fb654d03760..62ecf9ccd09be 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineLimits.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineLimits.java @@ -8,16 +8,15 @@ package org.elasticsearch.xpack.esql.optimizer.rules; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineOrderBy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineOrderBy.java index f01616953427d..286695abda25b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineOrderBy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownAndCombineOrderBy.java @@ -7,10 +7,9 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; public final class PushDownAndCombineOrderBy extends OptimizerRules.OptimizerRule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java index f6a0154108f2d..7185f63964c34 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEnrich.java @@ -7,10 +7,9 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import static org.elasticsearch.xpack.esql.core.expression.Expressions.asAttributes; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java index b936e5569c950..92c25a60bba77 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownEval.java @@ -7,10 +7,9 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import static org.elasticsearch.xpack.esql.core.expression.Expressions.asAttributes; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java index f247d0a631b29..d24a61f89dd7f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/PushDownRegexExtract.java @@ -7,9 +7,8 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; public final class PushDownRegexExtract extends OptimizerRules.OptimizerRule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java index cf04637e456a5..5592a04e2f813 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java @@ -8,11 +8,11 @@ package org.elasticsearch.xpack.esql.optimizer.rules; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.xpack.esql.core.analyzer.AnalyzerRules; +import org.elasticsearch.xpack.esql.analysis.AnalyzerRules; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.List; @@ -50,7 +50,7 @@ private static Aggregate removeAggDuplicates(Aggregate agg) { aggregates = removeDuplicateNames(aggregates); // replace EsqlAggregate with Aggregate - return new Aggregate(agg.source(), agg.child(), groupings, aggregates); + return new Aggregate(agg.source(), agg.child(), agg.aggregateType(), groupings, aggregates); } private static List removeDuplicateNames(List list) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceAliasingEvalWithProject.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceAliasingEvalWithProject.java index 2bbfeaac965ef..34b75cd89f68c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceAliasingEvalWithProject.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceAliasingEvalWithProject.java @@ -12,11 +12,11 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.Rule; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLimitAndSortAsTopN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLimitAndSortAsTopN.java index ec912735f8451..6394d11bb68c8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLimitAndSortAsTopN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLimitAndSortAsTopN.java @@ -7,10 +7,9 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.TopN; public final class ReplaceLimitAndSortAsTopN extends OptimizerRules.OptimizerRule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLookupWithJoin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLookupWithJoin.java index f6c8f4a59a70c..f258ea97bfa33 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLookupWithJoin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceLookupWithJoin.java @@ -7,8 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; import org.elasticsearch.xpack.esql.plan.logical.join.Join; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java index 476da7476f7fb..02fc98428f14a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceOrderByExpressionWithEval.java @@ -10,10 +10,9 @@ import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Order; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatch.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatch.java index 5cba7349debfd..cc18940e68924 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatch.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatch.java @@ -15,11 +15,10 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; -public final class ReplaceRegexMatch extends org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.OptimizerExpressionRule< - RegexMatch> { +public final class ReplaceRegexMatch extends OptimizerRules.OptimizerExpressionRule> { public ReplaceRegexMatch() { - super(org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.DOWN); + super(OptimizerRules.TransformDirection.DOWN); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java index 9a24926953947..31b543cd115df 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java @@ -12,14 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import java.util.ArrayList; @@ -138,7 +137,7 @@ protected LogicalPlan rule(Aggregate aggregate) { LogicalPlan plan = aggregate; if (changed.get()) { Source source = aggregate.source(); - plan = new Aggregate(source, aggregate.child(), aggregate.groupings(), newAggs); + plan = new Aggregate(source, aggregate.child(), aggregate.aggregateType(), aggregate.groupings(), newAggs); if (newEvals.size() > 0) { plan = new Eval(source, plan, newEvals); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java index dc7686f57f2f4..0979b745a6607 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java @@ -11,13 +11,12 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.HashMap; @@ -73,6 +72,15 @@ protected LogicalPlan rule(Aggregate aggregate) { // if the child is a nested expression Expression child = as.child(); + // do not replace nested aggregates + if (child instanceof AggregateFunction af) { + Holder foundNestedAggs = new Holder<>(Boolean.FALSE); + af.children().forEach(e -> e.forEachDown(AggregateFunction.class, unused -> foundNestedAggs.set(Boolean.TRUE))); + if (foundNestedAggs.get()) { + return as; + } + } + // shortcut for common scenario if (child instanceof AggregateFunction af && af.field() instanceof Attribute) { return as; @@ -125,7 +133,7 @@ protected LogicalPlan rule(Aggregate aggregate) { var aggregates = aggsChanged.get() ? newAggs : aggregate.aggregates(); var newEval = new Eval(aggregate.source(), aggregate.child(), evals); - aggregate = new Aggregate(aggregate.source(), newEval, groupings, aggregates); + aggregate = new Aggregate(aggregate.source(), newEval, aggregate.aggregateType(), groupings, aggregates); } return aggregate; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceTrivialTypeConversions.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceTrivialTypeConversions.java index 2763c71c4bcb6..dc877a99010f8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceTrivialTypeConversions.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceTrivialTypeConversions.java @@ -9,10 +9,9 @@ import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; /** * Replace type converting eval with aliasing eval when type change does not occur. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SetAsOptimized.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SetAsOptimized.java index 168270b68db2d..89d2e7613d2c7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SetAsOptimized.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SetAsOptimized.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.rule.Rule; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; public final class SetAsOptimized extends Rule { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SimplifyComparisonsArithmetics.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SimplifyComparisonsArithmetics.java index 0d3aaaa3a9d47..4ef069ea16d04 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SimplifyComparisonsArithmetics.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SimplifyComparisonsArithmetics.java @@ -32,12 +32,11 @@ /** * Simplifies arithmetic expressions with BinaryComparisons and fixed point fields, such as: (int + 2) / 3 > 4 => int > 10 */ -public final class SimplifyComparisonsArithmetics extends - org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.OptimizerExpressionRule { +public final class SimplifyComparisonsArithmetics extends OptimizerRules.OptimizerExpressionRule { BiFunction typesCompatible; public SimplifyComparisonsArithmetics(BiFunction typesCompatible) { - super(org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.TransformDirection.UP); + super(OptimizerRules.TransformDirection.UP); this.typesCompatible = typesCompatible; } @@ -132,7 +131,7 @@ final boolean isUnsafe(BiFunction typesCompatible) // x + 1e18 > 1e18::long will yield different results with a field value in [-2^6, 2^6], optimised vs original; // x * (1 + 1e-15d) > 1 : same with a field value of (1 - 1e-15d) // so consequently, int fields optimisation requiring FP arithmetic isn't possible either: (x - 1e-15) * (1 + 1e-15) > 1. - if (opLiteral.dataType().isRational() || bcLiteral.dataType().isRational()) { + if (opLiteral.dataType().isRationalNumber() || bcLiteral.dataType().isRationalNumber()) { return true; } @@ -146,7 +145,7 @@ final boolean isUnsafe(BiFunction typesCompatible) final Expression apply() { // force float point folding for FlP field - Literal bcl = operation.dataType().isRational() + Literal bcl = operation.dataType().isRationalNumber() ? new Literal(bcLiteral.source(), ((Number) bcLiteral.value()).doubleValue(), DataType.DOUBLE) : bcLiteral; @@ -177,7 +176,7 @@ private static class AddSubSimplifier extends OperationSimplifier { @Override boolean isOpUnsafe() { // no ADD/SUB with floating fields - if (operation.dataType().isRational()) { + if (operation.dataType().isRationalNumber()) { return true; } @@ -204,12 +203,12 @@ private static class MulDivSimplifier extends OperationSimplifier { @Override boolean isOpUnsafe() { // Integer divisions are not safe to optimise: x / 5 > 1 <=/=> x > 5 for x in [6, 9]; same for the `==` comp - if (operation.dataType().isInteger() && isDiv) { + if (operation.dataType().isWholeNumber() && isDiv) { return true; } // If current operation is a multiplication, it's inverse will be a division: safe only if outcome is still integral. - if (isDiv == false && opLeft.dataType().isInteger()) { + if (isDiv == false && opLeft.dataType().isWholeNumber()) { long opLiteralValue = ((Number) opLiteral.value()).longValue(); return opLiteralValue == 0 || ((Number) bcLiteral.value()).longValue() % opLiteralValue != 0; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnEmptyMappings.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnEmptyMappings.java index 7ec215db65626..99efacd4ea39a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnEmptyMappings.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnEmptyMappings.java @@ -7,9 +7,8 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnLimitZero.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnLimitZero.java index 7cb4f2926045d..199520d648a26 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnLimitZero.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SkipQueryOnLimitZero.java @@ -7,15 +7,18 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -public final class SkipQueryOnLimitZero extends OptimizerRules.SkipQueryOnLimitZero { - +public final class SkipQueryOnLimitZero extends OptimizerRules.OptimizerRule { @Override - protected LogicalPlan skipPlan(Limit limit) { - return LogicalPlanOptimizer.skipPlan(limit); + protected LogicalPlan rule(Limit limit) { + if (limit.limit().foldable()) { + if (Integer.valueOf(0).equals((limit.limit().fold()))) { + return LogicalPlanOptimizer.skipPlan(limit); + } + } + return limit; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SplitInWithFoldableValue.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SplitInWithFoldableValue.java index c762f396a6f43..1d4e90fe0d5ca 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SplitInWithFoldableValue.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SplitInWithFoldableValue.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSpatialSurrogates.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSpatialSurrogates.java index c5293785bf1ba..e6501452eeb65 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSpatialSurrogates.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSpatialSurrogates.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.optimizer.rules; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; /** diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java index 39617b443a286..2307f6324e942 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/SubstituteSurrogates.java @@ -15,12 +15,12 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -70,6 +70,10 @@ protected LogicalPlan rule(Aggregate aggregate) { if (s instanceof AggregateFunction == false) { // 1. collect all aggregate functions from the expression var surrogateWithRefs = s.transformUp(AggregateFunction.class, af -> { + // TODO: more generic than this? + if (af instanceof Rate) { + return af; + } // 2. check if they are already use otherwise add them to the Aggregate with some made-up aliases // 3. replace them inside the expression using the given alias var attr = aggFuncToAttr.get(af); @@ -103,7 +107,7 @@ protected LogicalPlan rule(Aggregate aggregate) { if (changed) { var source = aggregate.source(); if (newAggs.isEmpty() == false) { - plan = new Aggregate(source, aggregate.child(), aggregate.groupings(), newAggs); + plan = new Aggregate(source, aggregate.child(), aggregate.aggregateType(), aggregate.groupings(), newAggs); } else { // All aggs actually have been surrogates for (foldable) expressions, e.g. // \_Aggregate[[],[AVG([1, 2][INTEGER]) AS s]] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java new file mode 100644 index 0000000000000..10c7a7325debc --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/TranslateMetricsAggregate.java @@ -0,0 +1,242 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.util.Holder; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.aggregate.FromPartial; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; +import org.elasticsearch.xpack.esql.expression.function.aggregate.ToPartial; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; +import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +/** + * Rate aggregation is special because it must be computed per time series, regardless of the grouping keys. + * The keys must be `_tsid` or a pair of `_tsid` and `time_bucket`. To support user-defined grouping keys, + * we first execute the rate aggregation using the time-series keys, then perform another aggregation with + * the resulting rate using the user-specific keys. + *

      + * This class translates the aggregates in the METRICS commands to standard aggregates. + * This approach helps avoid introducing new plans and operators for metrics aggregations specially. + *

      + * Examples: + *

      + * METRICS k8s max(rate(request))
      + *
      + * becomes
      + *
      + * METRICS k8s
      + * | STATS rate(request) BY _tsid
      + * | STATS max(`rate(request)`)
      + *
      + * METRICS k8s max(rate(request)) BY host
      + *
      + * becomes
      + *
      + * METRICS k8s
      + * | STATS rate(request), VALUES(host) BY _tsid
      + * | STATS max(`rate(request)`) BY host=`VALUES(host)`
      + *
      + * METRICS k8s avg(rate(request)) BY host
      + *
      + * becomes
      + *
      + * METRICS k8s
      + * | STATS rate(request), VALUES(host) BY _tsid
      + * | STATS sum=sum(`rate(request)`), count(`rate(request)`) BY host=`VALUES(host)`
      + * | EVAL `avg(rate(request))` = `sum(rate(request))` / `count(rate(request))`
      + * | KEEP `avg(rate(request))`, host
      + *
      + * METRICS k8s avg(rate(request)) BY host, bucket(@timestamp, 1minute)
      + *
      + * becomes
      + *
      + * METRICS k8s
      + * | EVAL  `bucket(@timestamp, 1minute)`=datetrunc(@timestamp, 1minute)
      + * | STATS rate(request), VALUES(host) BY _tsid,`bucket(@timestamp, 1minute)`
      + * | STATS sum=sum(`rate(request)`), count(`rate(request)`) BY host=`VALUES(host)`, `bucket(@timestamp, 1minute)`
      + * | EVAL `avg(rate(request))` = `sum(rate(request))` / `count(rate(request))`
      + * | KEEP `avg(rate(request))`, host, `bucket(@timestamp, 1minute)`
      + * 
      + * + * Non-rate aggregates will be rewritten as a pair of to_partial and from_partial aggregates, where the `to_partial` + * aggregates will be executed in the first pass and always produce an intermediate output regardless of the aggregate + * mode. The `from_partial` aggregates will be executed on the second pass and always receive intermediate output + * produced by `to_partial`. Examples: + * + *
      + * METRICS k8s max(rate(request)), max(memory_used) becomes:
      + *
      + * METRICS k8s
      + * | STATS rate(request), $p1=to_partial(max(memory_used)) BY _tsid
      + * | STATS max(`rate(request)`), `max(memory_used)` = from_partial($p1, max($_))
      + *
      + * METRICS k8s max(rate(request)) avg(memory_used) BY host
      + *
      + * becomes
      + *
      + * METRICS k8s
      + * | STATS rate(request), $p1=to_partial(sum(memory_used)), $p2=to_partial(count(memory_used)), VALUES(host) BY _tsid
      + * | STATS max(`rate(request)`), $sum=from_partial($p1, sum($_)), $count=from_partial($p2, count($_)) BY host=`VALUES(host)`
      + * | EVAL `avg(memory_used)` = $sum / $count
      + * | KEEP `max(rate(request))`, `avg(memory_used)`, host
      + *
      + * METRICS k8s min(memory_used) sum(rate(request)) BY pod, bucket(@timestamp, 5m)
      + *
      + * becomes
      + *
      + * METRICS k8s
      + * | EVAL `bucket(@timestamp, 5m)` = datetrunc(@timestamp, '5m')
      + * | STATS rate(request), $p1=to_partial(min(memory_used)), VALUES(pod) BY _tsid, `bucket(@timestamp, 5m)`
      + * | STATS sum(`rate(request)`), `min(memory_used)` = from_partial($p1, min($)) BY pod=`VALUES(pod)`, `bucket(@timestamp, 5m)`
      + * | KEEP `min(memory_used)`, `sum(rate(request))`, pod, `bucket(@timestamp, 5m)`
      + * 
      + */ +public final class TranslateMetricsAggregate extends OptimizerRules.OptimizerRule { + + public TranslateMetricsAggregate() { + super(OptimizerRules.TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(Aggregate aggregate) { + if (aggregate.aggregateType() == Aggregate.AggregateType.METRICS) { + return translate(aggregate); + } else { + return aggregate; + } + } + + LogicalPlan translate(Aggregate metrics) { + Map rateAggs = new HashMap<>(); + List firstPassAggs = new ArrayList<>(); + List secondPassAggs = new ArrayList<>(); + for (NamedExpression agg : metrics.aggregates()) { + if (agg instanceof Alias alias && alias.child() instanceof AggregateFunction af) { + Holder changed = new Holder<>(Boolean.FALSE); + Expression outerAgg = af.transformDown(Rate.class, rate -> { + changed.set(Boolean.TRUE); + Alias rateAgg = rateAggs.computeIfAbsent(rate, k -> { + Alias newRateAgg = new Alias(rate.source(), agg.name(), rate); + firstPassAggs.add(newRateAgg); + return newRateAgg; + }); + return rateAgg.toAttribute(); + }); + if (changed.get()) { + secondPassAggs.add(new Alias(alias.source(), alias.name(), null, outerAgg, agg.id())); + } else { + var toPartial = new Alias(agg.source(), alias.name(), new ToPartial(agg.source(), af.field(), af)); + var fromPartial = new FromPartial(agg.source(), toPartial.toAttribute(), af); + firstPassAggs.add(toPartial); + secondPassAggs.add(new Alias(alias.source(), alias.name(), null, fromPartial, alias.id())); + } + } + } + if (rateAggs.isEmpty()) { + return toStandardAggregate(metrics); + } + Holder tsid = new Holder<>(); + Holder timestamp = new Holder<>(); + metrics.forEachDown(EsRelation.class, r -> { + for (Attribute attr : r.output()) { + if (attr.name().equals(MetadataAttribute.TSID_FIELD)) { + tsid.set(attr); + } + if (attr.name().equals(MetadataAttribute.TIMESTAMP_FIELD)) { + timestamp.set(attr); + } + } + }); + if (tsid.get() == null || timestamp.get() == null) { + throw new IllegalArgumentException("_tsid or @timestamp field are missing from the metrics source"); + } + // metrics aggregates must be grouped by _tsid (and time-bucket) first and re-group by users key + List firstPassGroupings = new ArrayList<>(); + firstPassGroupings.add(tsid.get()); + List secondPassGroupings = new ArrayList<>(); + Holder timeBucketRef = new Holder<>(); + metrics.child().forEachExpressionUp(NamedExpression.class, e -> { + for (Expression child : e.children()) { + if (child instanceof Bucket bucket && bucket.field().equals(timestamp.get())) { + if (timeBucketRef.get() != null) { + throw new IllegalArgumentException("expected at most one time bucket"); + } + timeBucketRef.set(e); + } + } + }); + NamedExpression timeBucket = timeBucketRef.get(); + for (Expression group : metrics.groupings()) { + if (group instanceof Attribute == false) { + throw new EsqlIllegalArgumentException("expected named expression for grouping; got " + group); + } + final Attribute g = (Attribute) group; + final NamedExpression newFinalGroup; + if (timeBucket != null && g.id().equals(timeBucket.id())) { + newFinalGroup = timeBucket.toAttribute(); + firstPassGroupings.add(newFinalGroup); + } else { + newFinalGroup = new Alias(g.source(), g.name(), null, new Values(g.source(), g), g.id()); + firstPassAggs.add(newFinalGroup); + } + secondPassGroupings.add(new Alias(g.source(), g.name(), null, newFinalGroup.toAttribute(), g.id())); + } + return newAggregate( + newAggregate(metrics.child(), Aggregate.AggregateType.METRICS, firstPassAggs, firstPassGroupings), + Aggregate.AggregateType.STANDARD, + secondPassAggs, + secondPassGroupings + ); + } + + private static Aggregate toStandardAggregate(Aggregate metrics) { + final LogicalPlan child = metrics.child().transformDown(EsRelation.class, r -> { + var attributes = new ArrayList<>(new AttributeSet(metrics.inputSet())); + attributes.removeIf(a -> a.name().equals(MetadataAttribute.TSID_FIELD)); + if (attributes.stream().noneMatch(a -> a.name().equals(MetadataAttribute.TIMESTAMP_FIELD))) { + attributes.removeIf(a -> a.name().equals(MetadataAttribute.TIMESTAMP_FIELD)); + } + return new EsRelation(r.source(), r.index(), new ArrayList<>(attributes), IndexMode.STANDARD); + }); + return new Aggregate(metrics.source(), child, Aggregate.AggregateType.STANDARD, metrics.groupings(), metrics.aggregates()); + } + + private static Aggregate newAggregate( + LogicalPlan child, + Aggregate.AggregateType type, + List aggregates, + List groupings + ) { + return new Aggregate( + child.source(), + child, + type, + groupings, + Stream.concat(aggregates.stream(), groupings.stream().map(Expressions::attribute)).toList() + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/AbstractBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/AbstractBuilder.java index 0ec1d0b742726..ce8c743106411 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/AbstractBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/AbstractBuilder.java @@ -20,28 +20,31 @@ public Object visit(ParseTree tree) { } @Override - public Object visitTerminal(TerminalNode node) { + public Source visitTerminal(TerminalNode node) { return ParserUtils.source(node); } - static String unquoteString(Source source) { - String text = source.text(); - if (text == null) { + static String unquote(Source source) { + return unquote(source.text()); + } + + static String unquote(String string) { + if (string == null) { return null; } // unescaped strings can be interpreted directly - if (text.startsWith("\"\"\"")) { - return text.substring(3, text.length() - 3); + if (string.startsWith("\"\"\"")) { + return string.substring(3, string.length() - 3); } - text = text.substring(1, text.length() - 1); + string = string.substring(1, string.length() - 1); StringBuilder sb = new StringBuilder(); - for (int i = 0; i < text.length();) { - if (text.charAt(i) == '\\') { + for (int i = 0; i < string.length();) { + if (string.charAt(i) == '\\') { // ANTLR4 Grammar guarantees there is always a character after the `\` - switch (text.charAt(++i)) { + switch (string.charAt(++i)) { case 't' -> sb.append('\t'); case 'n' -> sb.append('\n'); case 'r' -> sb.append('\r'); @@ -51,11 +54,11 @@ static String unquoteString(Source source) { // will be interpreted as regex, so we have to escape it default -> // unknown escape sequence, pass through as-is, e.g: `...\w...` - sb.append('\\').append(text.charAt(i)); + sb.append('\\').append(string.charAt(i)); } i++; } else { - sb.append(text.charAt(i++)); + sb.append(string.charAt(i++)); } } return sb.toString(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp index eb3689d0900d3..f5484f3e7070f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp @@ -151,7 +151,7 @@ UNKNOWN_CMD LINE_COMMENT MULTILINE_COMMENT WS -INDEX_UNQUOTED_IDENTIFIER +UNQUOTED_SOURCE EXPLAIN_WS EXPLAIN_LINE_COMMENT EXPLAIN_MULTILINE_COMMENT @@ -277,8 +277,8 @@ UNKNOWN_CMD LINE_COMMENT MULTILINE_COMMENT WS -INDEX_UNQUOTED_IDENTIFIER_PART -INDEX_UNQUOTED_IDENTIFIER +UNQUOTED_SOURCE_PART +UNQUOTED_SOURCE EXPLAIN_OPENING_BRACKET EXPLAIN_PIPE EXPLAIN_WS @@ -345,11 +345,12 @@ EXPR_WS FROM_PIPE FROM_OPENING_BRACKET FROM_CLOSING_BRACKET +FROM_COLON FROM_COMMA FROM_ASSIGN -FROM_QUOTED_STRING METADATA -FROM_INDEX_UNQUOTED_IDENTIFIER +FROM_UNQUOTED_SOURCE +FROM_QUOTED_SOURCE FROM_LINE_COMMENT FROM_MULTILINE_COMMENT FROM_WS @@ -377,7 +378,6 @@ ON WITH ENRICH_POLICY_NAME_BODY ENRICH_POLICY_NAME -ENRICH_QUOTED_IDENTIFIER ENRICH_MODE_UNQUOTED_VALUE ENRICH_LINE_COMMENT ENRICH_MULTILINE_COMMENT @@ -393,10 +393,12 @@ ENRICH_FIELD_LINE_COMMENT ENRICH_FIELD_MULTILINE_COMMENT ENRICH_FIELD_WS LOOKUP_PIPE +LOOKUP_COLON LOOKUP_COMMA LOOKUP_DOT LOOKUP_ON -LOOKUP_INDEX_UNQUOTED_IDENTIFIER +LOOKUP_UNQUOTED_SOURCE +LOOKUP_QUOTED_SOURCE LOOKUP_LINE_COMMENT LOOKUP_MULTILINE_COMMENT LOOKUP_WS @@ -431,10 +433,12 @@ SETTING_LINE_COMMENT SETTTING_MULTILINE_COMMENT SETTING_WS METRICS_PIPE -METRICS_INDEX_UNQUOTED_IDENTIFIER +METRICS_UNQUOTED_SOURCE +METRICS_QUOTED_SOURCE METRICS_LINE_COMMENT METRICS_MULTILINE_COMMENT METRICS_WS +CLOSING_METRICS_COLON CLOSING_METRICS_COMMA CLOSING_METRICS_LINE_COMMENT CLOSING_METRICS_MULTILINE_COMMENT @@ -467,4 +471,4 @@ METRICS_MODE CLOSING_METRICS_MODE atn: -[4, 0, 124, 1422, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 4, 20, 567, 8, 20, 11, 20, 12, 20, 568, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 577, 8, 21, 10, 21, 12, 21, 580, 9, 21, 1, 21, 3, 21, 583, 8, 21, 1, 21, 3, 21, 586, 8, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 595, 8, 22, 10, 22, 12, 22, 598, 9, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 4, 23, 606, 8, 23, 11, 23, 12, 23, 607, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 3, 24, 615, 8, 24, 1, 25, 4, 25, 618, 8, 25, 11, 25, 12, 25, 619, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 36, 1, 36, 3, 36, 659, 8, 36, 1, 36, 4, 36, 662, 8, 36, 11, 36, 12, 36, 663, 1, 37, 1, 37, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 3, 39, 673, 8, 39, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 3, 41, 680, 8, 41, 1, 42, 1, 42, 1, 42, 5, 42, 685, 8, 42, 10, 42, 12, 42, 688, 9, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 5, 42, 696, 8, 42, 10, 42, 12, 42, 699, 9, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 3, 42, 706, 8, 42, 1, 42, 3, 42, 709, 8, 42, 3, 42, 711, 8, 42, 1, 43, 4, 43, 714, 8, 43, 11, 43, 12, 43, 715, 1, 44, 4, 44, 719, 8, 44, 11, 44, 12, 44, 720, 1, 44, 1, 44, 5, 44, 725, 8, 44, 10, 44, 12, 44, 728, 9, 44, 1, 44, 1, 44, 4, 44, 732, 8, 44, 11, 44, 12, 44, 733, 1, 44, 4, 44, 737, 8, 44, 11, 44, 12, 44, 738, 1, 44, 1, 44, 5, 44, 743, 8, 44, 10, 44, 12, 44, 746, 9, 44, 3, 44, 748, 8, 44, 1, 44, 1, 44, 1, 44, 1, 44, 4, 44, 754, 8, 44, 11, 44, 12, 44, 755, 1, 44, 1, 44, 3, 44, 760, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 76, 1, 76, 1, 77, 1, 77, 1, 78, 1, 78, 1, 79, 1, 79, 1, 80, 1, 80, 1, 80, 5, 80, 882, 8, 80, 10, 80, 12, 80, 885, 9, 80, 1, 80, 1, 80, 4, 80, 889, 8, 80, 11, 80, 12, 80, 890, 3, 80, 893, 8, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 5, 83, 907, 8, 83, 10, 83, 12, 83, 910, 9, 83, 1, 83, 1, 83, 3, 83, 914, 8, 83, 1, 83, 4, 83, 917, 8, 83, 11, 83, 12, 83, 918, 3, 83, 921, 8, 83, 1, 84, 1, 84, 4, 84, 925, 8, 84, 11, 84, 12, 84, 926, 1, 84, 1, 84, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 3, 103, 1012, 8, 103, 1, 104, 1, 104, 3, 104, 1016, 8, 104, 1, 104, 5, 104, 1019, 8, 104, 10, 104, 12, 104, 1022, 9, 104, 1, 104, 1, 104, 3, 104, 1026, 8, 104, 1, 104, 4, 104, 1029, 8, 104, 11, 104, 12, 104, 1030, 3, 104, 1033, 8, 104, 1, 105, 1, 105, 4, 105, 1037, 8, 105, 11, 105, 12, 105, 1038, 1, 106, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 123, 4, 123, 1114, 8, 123, 11, 123, 12, 123, 1115, 1, 123, 1, 123, 3, 123, 1120, 8, 123, 1, 123, 4, 123, 1123, 8, 123, 11, 123, 12, 123, 1124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 4, 173, 1343, 8, 173, 11, 173, 12, 173, 1344, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 189, 2, 596, 697, 0, 190, 16, 1, 18, 2, 20, 3, 22, 4, 24, 5, 26, 6, 28, 7, 30, 8, 32, 9, 34, 10, 36, 11, 38, 12, 40, 13, 42, 14, 44, 15, 46, 16, 48, 17, 50, 18, 52, 19, 54, 20, 56, 21, 58, 22, 60, 23, 62, 24, 64, 0, 66, 25, 68, 0, 70, 0, 72, 26, 74, 27, 76, 28, 78, 29, 80, 0, 82, 0, 84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0, 96, 0, 98, 0, 100, 30, 102, 31, 104, 32, 106, 33, 108, 34, 110, 35, 112, 36, 114, 37, 116, 38, 118, 39, 120, 40, 122, 41, 124, 42, 126, 43, 128, 44, 130, 45, 132, 46, 134, 47, 136, 48, 138, 49, 140, 50, 142, 51, 144, 52, 146, 53, 148, 54, 150, 55, 152, 56, 154, 57, 156, 58, 158, 59, 160, 60, 162, 61, 164, 62, 166, 63, 168, 64, 170, 65, 172, 66, 174, 67, 176, 68, 178, 69, 180, 70, 182, 71, 184, 0, 186, 72, 188, 73, 190, 74, 192, 75, 194, 0, 196, 0, 198, 0, 200, 0, 202, 0, 204, 0, 206, 76, 208, 0, 210, 77, 212, 78, 214, 79, 216, 0, 218, 0, 220, 0, 222, 0, 224, 0, 226, 80, 228, 81, 230, 82, 232, 83, 234, 0, 236, 0, 238, 0, 240, 0, 242, 84, 244, 0, 246, 85, 248, 86, 250, 87, 252, 0, 254, 0, 256, 88, 258, 89, 260, 0, 262, 90, 264, 0, 266, 0, 268, 91, 270, 92, 272, 93, 274, 0, 276, 0, 278, 0, 280, 0, 282, 0, 284, 0, 286, 0, 288, 94, 290, 95, 292, 96, 294, 0, 296, 0, 298, 0, 300, 0, 302, 0, 304, 97, 306, 98, 308, 99, 310, 0, 312, 0, 314, 0, 316, 0, 318, 100, 320, 101, 322, 102, 324, 0, 326, 0, 328, 0, 330, 0, 332, 103, 334, 104, 336, 105, 338, 0, 340, 106, 342, 107, 344, 108, 346, 109, 348, 0, 350, 110, 352, 111, 354, 112, 356, 113, 358, 0, 360, 114, 362, 115, 364, 116, 366, 117, 368, 118, 370, 0, 372, 0, 374, 119, 376, 120, 378, 121, 380, 0, 382, 122, 384, 123, 386, 124, 388, 0, 390, 0, 392, 0, 394, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 10, 0, 9, 10, 13, 13, 32, 32, 44, 44, 47, 47, 61, 61, 91, 91, 93, 93, 96, 96, 124, 124, 2, 0, 42, 42, 47, 47, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 5, 0, 34, 34, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1448, 0, 16, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 0, 20, 1, 0, 0, 0, 0, 22, 1, 0, 0, 0, 0, 24, 1, 0, 0, 0, 0, 26, 1, 0, 0, 0, 0, 28, 1, 0, 0, 0, 0, 30, 1, 0, 0, 0, 0, 32, 1, 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, 36, 1, 0, 0, 0, 0, 38, 1, 0, 0, 0, 0, 40, 1, 0, 0, 0, 0, 42, 1, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 46, 1, 0, 0, 0, 0, 48, 1, 0, 0, 0, 0, 50, 1, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 54, 1, 0, 0, 0, 0, 56, 1, 0, 0, 0, 0, 58, 1, 0, 0, 0, 0, 60, 1, 0, 0, 0, 0, 62, 1, 0, 0, 0, 0, 66, 1, 0, 0, 0, 1, 68, 1, 0, 0, 0, 1, 70, 1, 0, 0, 0, 1, 72, 1, 0, 0, 0, 1, 74, 1, 0, 0, 0, 1, 76, 1, 0, 0, 0, 2, 78, 1, 0, 0, 0, 2, 100, 1, 0, 0, 0, 2, 102, 1, 0, 0, 0, 2, 104, 1, 0, 0, 0, 2, 106, 1, 0, 0, 0, 2, 108, 1, 0, 0, 0, 2, 110, 1, 0, 0, 0, 2, 112, 1, 0, 0, 0, 2, 114, 1, 0, 0, 0, 2, 116, 1, 0, 0, 0, 2, 118, 1, 0, 0, 0, 2, 120, 1, 0, 0, 0, 2, 122, 1, 0, 0, 0, 2, 124, 1, 0, 0, 0, 2, 126, 1, 0, 0, 0, 2, 128, 1, 0, 0, 0, 2, 130, 1, 0, 0, 0, 2, 132, 1, 0, 0, 0, 2, 134, 1, 0, 0, 0, 2, 136, 1, 0, 0, 0, 2, 138, 1, 0, 0, 0, 2, 140, 1, 0, 0, 0, 2, 142, 1, 0, 0, 0, 2, 144, 1, 0, 0, 0, 2, 146, 1, 0, 0, 0, 2, 148, 1, 0, 0, 0, 2, 150, 1, 0, 0, 0, 2, 152, 1, 0, 0, 0, 2, 154, 1, 0, 0, 0, 2, 156, 1, 0, 0, 0, 2, 158, 1, 0, 0, 0, 2, 160, 1, 0, 0, 0, 2, 162, 1, 0, 0, 0, 2, 164, 1, 0, 0, 0, 2, 166, 1, 0, 0, 0, 2, 168, 1, 0, 0, 0, 2, 170, 1, 0, 0, 0, 2, 172, 1, 0, 0, 0, 2, 174, 1, 0, 0, 0, 2, 176, 1, 0, 0, 0, 2, 178, 1, 0, 0, 0, 2, 180, 1, 0, 0, 0, 2, 182, 1, 0, 0, 0, 2, 186, 1, 0, 0, 0, 2, 188, 1, 0, 0, 0, 2, 190, 1, 0, 0, 0, 2, 192, 1, 0, 0, 0, 3, 194, 1, 0, 0, 0, 3, 196, 1, 0, 0, 0, 3, 198, 1, 0, 0, 0, 3, 200, 1, 0, 0, 0, 3, 202, 1, 0, 0, 0, 3, 204, 1, 0, 0, 0, 3, 206, 1, 0, 0, 0, 3, 208, 1, 0, 0, 0, 3, 210, 1, 0, 0, 0, 3, 212, 1, 0, 0, 0, 3, 214, 1, 0, 0, 0, 4, 216, 1, 0, 0, 0, 4, 218, 1, 0, 0, 0, 4, 220, 1, 0, 0, 0, 4, 226, 1, 0, 0, 0, 4, 228, 1, 0, 0, 0, 4, 230, 1, 0, 0, 0, 4, 232, 1, 0, 0, 0, 5, 234, 1, 0, 0, 0, 5, 236, 1, 0, 0, 0, 5, 238, 1, 0, 0, 0, 5, 240, 1, 0, 0, 0, 5, 242, 1, 0, 0, 0, 5, 244, 1, 0, 0, 0, 5, 246, 1, 0, 0, 0, 5, 248, 1, 0, 0, 0, 5, 250, 1, 0, 0, 0, 6, 252, 1, 0, 0, 0, 6, 254, 1, 0, 0, 0, 6, 256, 1, 0, 0, 0, 6, 258, 1, 0, 0, 0, 6, 262, 1, 0, 0, 0, 6, 264, 1, 0, 0, 0, 6, 266, 1, 0, 0, 0, 6, 268, 1, 0, 0, 0, 6, 270, 1, 0, 0, 0, 6, 272, 1, 0, 0, 0, 7, 274, 1, 0, 0, 0, 7, 276, 1, 0, 0, 0, 7, 278, 1, 0, 0, 0, 7, 280, 1, 0, 0, 0, 7, 282, 1, 0, 0, 0, 7, 284, 1, 0, 0, 0, 7, 286, 1, 0, 0, 0, 7, 288, 1, 0, 0, 0, 7, 290, 1, 0, 0, 0, 7, 292, 1, 0, 0, 0, 8, 294, 1, 0, 0, 0, 8, 296, 1, 0, 0, 0, 8, 298, 1, 0, 0, 0, 8, 300, 1, 0, 0, 0, 8, 302, 1, 0, 0, 0, 8, 304, 1, 0, 0, 0, 8, 306, 1, 0, 0, 0, 8, 308, 1, 0, 0, 0, 9, 310, 1, 0, 0, 0, 9, 312, 1, 0, 0, 0, 9, 314, 1, 0, 0, 0, 9, 316, 1, 0, 0, 0, 9, 318, 1, 0, 0, 0, 9, 320, 1, 0, 0, 0, 9, 322, 1, 0, 0, 0, 10, 324, 1, 0, 0, 0, 10, 326, 1, 0, 0, 0, 10, 328, 1, 0, 0, 0, 10, 330, 1, 0, 0, 0, 10, 332, 1, 0, 0, 0, 10, 334, 1, 0, 0, 0, 10, 336, 1, 0, 0, 0, 11, 338, 1, 0, 0, 0, 11, 340, 1, 0, 0, 0, 11, 342, 1, 0, 0, 0, 11, 344, 1, 0, 0, 0, 11, 346, 1, 0, 0, 0, 12, 348, 1, 0, 0, 0, 12, 350, 1, 0, 0, 0, 12, 352, 1, 0, 0, 0, 12, 354, 1, 0, 0, 0, 12, 356, 1, 0, 0, 0, 13, 358, 1, 0, 0, 0, 13, 360, 1, 0, 0, 0, 13, 362, 1, 0, 0, 0, 13, 364, 1, 0, 0, 0, 13, 366, 1, 0, 0, 0, 13, 368, 1, 0, 0, 0, 14, 370, 1, 0, 0, 0, 14, 372, 1, 0, 0, 0, 14, 374, 1, 0, 0, 0, 14, 376, 1, 0, 0, 0, 14, 378, 1, 0, 0, 0, 15, 380, 1, 0, 0, 0, 15, 382, 1, 0, 0, 0, 15, 384, 1, 0, 0, 0, 15, 386, 1, 0, 0, 0, 15, 388, 1, 0, 0, 0, 15, 390, 1, 0, 0, 0, 15, 392, 1, 0, 0, 0, 15, 394, 1, 0, 0, 0, 16, 396, 1, 0, 0, 0, 18, 406, 1, 0, 0, 0, 20, 413, 1, 0, 0, 0, 22, 422, 1, 0, 0, 0, 24, 429, 1, 0, 0, 0, 26, 439, 1, 0, 0, 0, 28, 446, 1, 0, 0, 0, 30, 453, 1, 0, 0, 0, 32, 467, 1, 0, 0, 0, 34, 474, 1, 0, 0, 0, 36, 482, 1, 0, 0, 0, 38, 491, 1, 0, 0, 0, 40, 498, 1, 0, 0, 0, 42, 508, 1, 0, 0, 0, 44, 520, 1, 0, 0, 0, 46, 529, 1, 0, 0, 0, 48, 535, 1, 0, 0, 0, 50, 542, 1, 0, 0, 0, 52, 549, 1, 0, 0, 0, 54, 557, 1, 0, 0, 0, 56, 566, 1, 0, 0, 0, 58, 572, 1, 0, 0, 0, 60, 589, 1, 0, 0, 0, 62, 605, 1, 0, 0, 0, 64, 614, 1, 0, 0, 0, 66, 617, 1, 0, 0, 0, 68, 621, 1, 0, 0, 0, 70, 626, 1, 0, 0, 0, 72, 631, 1, 0, 0, 0, 74, 635, 1, 0, 0, 0, 76, 639, 1, 0, 0, 0, 78, 643, 1, 0, 0, 0, 80, 647, 1, 0, 0, 0, 82, 649, 1, 0, 0, 0, 84, 651, 1, 0, 0, 0, 86, 654, 1, 0, 0, 0, 88, 656, 1, 0, 0, 0, 90, 665, 1, 0, 0, 0, 92, 667, 1, 0, 0, 0, 94, 672, 1, 0, 0, 0, 96, 674, 1, 0, 0, 0, 98, 679, 1, 0, 0, 0, 100, 710, 1, 0, 0, 0, 102, 713, 1, 0, 0, 0, 104, 759, 1, 0, 0, 0, 106, 761, 1, 0, 0, 0, 108, 764, 1, 0, 0, 0, 110, 768, 1, 0, 0, 0, 112, 772, 1, 0, 0, 0, 114, 774, 1, 0, 0, 0, 116, 777, 1, 0, 0, 0, 118, 779, 1, 0, 0, 0, 120, 784, 1, 0, 0, 0, 122, 786, 1, 0, 0, 0, 124, 792, 1, 0, 0, 0, 126, 798, 1, 0, 0, 0, 128, 803, 1, 0, 0, 0, 130, 805, 1, 0, 0, 0, 132, 808, 1, 0, 0, 0, 134, 811, 1, 0, 0, 0, 136, 816, 1, 0, 0, 0, 138, 820, 1, 0, 0, 0, 140, 825, 1, 0, 0, 0, 142, 831, 1, 0, 0, 0, 144, 834, 1, 0, 0, 0, 146, 836, 1, 0, 0, 0, 148, 842, 1, 0, 0, 0, 150, 844, 1, 0, 0, 0, 152, 849, 1, 0, 0, 0, 154, 852, 1, 0, 0, 0, 156, 855, 1, 0, 0, 0, 158, 858, 1, 0, 0, 0, 160, 860, 1, 0, 0, 0, 162, 863, 1, 0, 0, 0, 164, 865, 1, 0, 0, 0, 166, 868, 1, 0, 0, 0, 168, 870, 1, 0, 0, 0, 170, 872, 1, 0, 0, 0, 172, 874, 1, 0, 0, 0, 174, 876, 1, 0, 0, 0, 176, 892, 1, 0, 0, 0, 178, 894, 1, 0, 0, 0, 180, 899, 1, 0, 0, 0, 182, 920, 1, 0, 0, 0, 184, 922, 1, 0, 0, 0, 186, 930, 1, 0, 0, 0, 188, 932, 1, 0, 0, 0, 190, 936, 1, 0, 0, 0, 192, 940, 1, 0, 0, 0, 194, 944, 1, 0, 0, 0, 196, 949, 1, 0, 0, 0, 198, 953, 1, 0, 0, 0, 200, 957, 1, 0, 0, 0, 202, 961, 1, 0, 0, 0, 204, 965, 1, 0, 0, 0, 206, 969, 1, 0, 0, 0, 208, 978, 1, 0, 0, 0, 210, 982, 1, 0, 0, 0, 212, 986, 1, 0, 0, 0, 214, 990, 1, 0, 0, 0, 216, 994, 1, 0, 0, 0, 218, 999, 1, 0, 0, 0, 220, 1003, 1, 0, 0, 0, 222, 1011, 1, 0, 0, 0, 224, 1032, 1, 0, 0, 0, 226, 1036, 1, 0, 0, 0, 228, 1040, 1, 0, 0, 0, 230, 1044, 1, 0, 0, 0, 232, 1048, 1, 0, 0, 0, 234, 1052, 1, 0, 0, 0, 236, 1057, 1, 0, 0, 0, 238, 1061, 1, 0, 0, 0, 240, 1065, 1, 0, 0, 0, 242, 1069, 1, 0, 0, 0, 244, 1072, 1, 0, 0, 0, 246, 1076, 1, 0, 0, 0, 248, 1080, 1, 0, 0, 0, 250, 1084, 1, 0, 0, 0, 252, 1088, 1, 0, 0, 0, 254, 1093, 1, 0, 0, 0, 256, 1098, 1, 0, 0, 0, 258, 1103, 1, 0, 0, 0, 260, 1110, 1, 0, 0, 0, 262, 1119, 1, 0, 0, 0, 264, 1126, 1, 0, 0, 0, 266, 1130, 1, 0, 0, 0, 268, 1134, 1, 0, 0, 0, 270, 1138, 1, 0, 0, 0, 272, 1142, 1, 0, 0, 0, 274, 1146, 1, 0, 0, 0, 276, 1152, 1, 0, 0, 0, 278, 1156, 1, 0, 0, 0, 280, 1160, 1, 0, 0, 0, 282, 1164, 1, 0, 0, 0, 284, 1168, 1, 0, 0, 0, 286, 1172, 1, 0, 0, 0, 288, 1176, 1, 0, 0, 0, 290, 1180, 1, 0, 0, 0, 292, 1184, 1, 0, 0, 0, 294, 1188, 1, 0, 0, 0, 296, 1193, 1, 0, 0, 0, 298, 1197, 1, 0, 0, 0, 300, 1201, 1, 0, 0, 0, 302, 1206, 1, 0, 0, 0, 304, 1210, 1, 0, 0, 0, 306, 1214, 1, 0, 0, 0, 308, 1218, 1, 0, 0, 0, 310, 1222, 1, 0, 0, 0, 312, 1228, 1, 0, 0, 0, 314, 1232, 1, 0, 0, 0, 316, 1236, 1, 0, 0, 0, 318, 1240, 1, 0, 0, 0, 320, 1244, 1, 0, 0, 0, 322, 1248, 1, 0, 0, 0, 324, 1252, 1, 0, 0, 0, 326, 1257, 1, 0, 0, 0, 328, 1261, 1, 0, 0, 0, 330, 1265, 1, 0, 0, 0, 332, 1269, 1, 0, 0, 0, 334, 1273, 1, 0, 0, 0, 336, 1277, 1, 0, 0, 0, 338, 1281, 1, 0, 0, 0, 340, 1286, 1, 0, 0, 0, 342, 1291, 1, 0, 0, 0, 344, 1295, 1, 0, 0, 0, 346, 1299, 1, 0, 0, 0, 348, 1303, 1, 0, 0, 0, 350, 1308, 1, 0, 0, 0, 352, 1318, 1, 0, 0, 0, 354, 1322, 1, 0, 0, 0, 356, 1326, 1, 0, 0, 0, 358, 1330, 1, 0, 0, 0, 360, 1335, 1, 0, 0, 0, 362, 1342, 1, 0, 0, 0, 364, 1346, 1, 0, 0, 0, 366, 1350, 1, 0, 0, 0, 368, 1354, 1, 0, 0, 0, 370, 1358, 1, 0, 0, 0, 372, 1363, 1, 0, 0, 0, 374, 1369, 1, 0, 0, 0, 376, 1373, 1, 0, 0, 0, 378, 1377, 1, 0, 0, 0, 380, 1381, 1, 0, 0, 0, 382, 1387, 1, 0, 0, 0, 384, 1391, 1, 0, 0, 0, 386, 1395, 1, 0, 0, 0, 388, 1399, 1, 0, 0, 0, 390, 1405, 1, 0, 0, 0, 392, 1411, 1, 0, 0, 0, 394, 1417, 1, 0, 0, 0, 396, 397, 5, 100, 0, 0, 397, 398, 5, 105, 0, 0, 398, 399, 5, 115, 0, 0, 399, 400, 5, 115, 0, 0, 400, 401, 5, 101, 0, 0, 401, 402, 5, 99, 0, 0, 402, 403, 5, 116, 0, 0, 403, 404, 1, 0, 0, 0, 404, 405, 6, 0, 0, 0, 405, 17, 1, 0, 0, 0, 406, 407, 5, 100, 0, 0, 407, 408, 5, 114, 0, 0, 408, 409, 5, 111, 0, 0, 409, 410, 5, 112, 0, 0, 410, 411, 1, 0, 0, 0, 411, 412, 6, 1, 1, 0, 412, 19, 1, 0, 0, 0, 413, 414, 5, 101, 0, 0, 414, 415, 5, 110, 0, 0, 415, 416, 5, 114, 0, 0, 416, 417, 5, 105, 0, 0, 417, 418, 5, 99, 0, 0, 418, 419, 5, 104, 0, 0, 419, 420, 1, 0, 0, 0, 420, 421, 6, 2, 2, 0, 421, 21, 1, 0, 0, 0, 422, 423, 5, 101, 0, 0, 423, 424, 5, 118, 0, 0, 424, 425, 5, 97, 0, 0, 425, 426, 5, 108, 0, 0, 426, 427, 1, 0, 0, 0, 427, 428, 6, 3, 0, 0, 428, 23, 1, 0, 0, 0, 429, 430, 5, 101, 0, 0, 430, 431, 5, 120, 0, 0, 431, 432, 5, 112, 0, 0, 432, 433, 5, 108, 0, 0, 433, 434, 5, 97, 0, 0, 434, 435, 5, 105, 0, 0, 435, 436, 5, 110, 0, 0, 436, 437, 1, 0, 0, 0, 437, 438, 6, 4, 3, 0, 438, 25, 1, 0, 0, 0, 439, 440, 5, 102, 0, 0, 440, 441, 5, 114, 0, 0, 441, 442, 5, 111, 0, 0, 442, 443, 5, 109, 0, 0, 443, 444, 1, 0, 0, 0, 444, 445, 6, 5, 4, 0, 445, 27, 1, 0, 0, 0, 446, 447, 5, 103, 0, 0, 447, 448, 5, 114, 0, 0, 448, 449, 5, 111, 0, 0, 449, 450, 5, 107, 0, 0, 450, 451, 1, 0, 0, 0, 451, 452, 6, 6, 0, 0, 452, 29, 1, 0, 0, 0, 453, 454, 5, 105, 0, 0, 454, 455, 5, 110, 0, 0, 455, 456, 5, 108, 0, 0, 456, 457, 5, 105, 0, 0, 457, 458, 5, 110, 0, 0, 458, 459, 5, 101, 0, 0, 459, 460, 5, 115, 0, 0, 460, 461, 5, 116, 0, 0, 461, 462, 5, 97, 0, 0, 462, 463, 5, 116, 0, 0, 463, 464, 5, 115, 0, 0, 464, 465, 1, 0, 0, 0, 465, 466, 6, 7, 0, 0, 466, 31, 1, 0, 0, 0, 467, 468, 5, 107, 0, 0, 468, 469, 5, 101, 0, 0, 469, 470, 5, 101, 0, 0, 470, 471, 5, 112, 0, 0, 471, 472, 1, 0, 0, 0, 472, 473, 6, 8, 1, 0, 473, 33, 1, 0, 0, 0, 474, 475, 5, 108, 0, 0, 475, 476, 5, 105, 0, 0, 476, 477, 5, 109, 0, 0, 477, 478, 5, 105, 0, 0, 478, 479, 5, 116, 0, 0, 479, 480, 1, 0, 0, 0, 480, 481, 6, 9, 0, 0, 481, 35, 1, 0, 0, 0, 482, 483, 5, 108, 0, 0, 483, 484, 5, 111, 0, 0, 484, 485, 5, 111, 0, 0, 485, 486, 5, 107, 0, 0, 486, 487, 5, 117, 0, 0, 487, 488, 5, 112, 0, 0, 488, 489, 1, 0, 0, 0, 489, 490, 6, 10, 5, 0, 490, 37, 1, 0, 0, 0, 491, 492, 5, 109, 0, 0, 492, 493, 5, 101, 0, 0, 493, 494, 5, 116, 0, 0, 494, 495, 5, 97, 0, 0, 495, 496, 1, 0, 0, 0, 496, 497, 6, 11, 6, 0, 497, 39, 1, 0, 0, 0, 498, 499, 5, 109, 0, 0, 499, 500, 5, 101, 0, 0, 500, 501, 5, 116, 0, 0, 501, 502, 5, 114, 0, 0, 502, 503, 5, 105, 0, 0, 503, 504, 5, 99, 0, 0, 504, 505, 5, 115, 0, 0, 505, 506, 1, 0, 0, 0, 506, 507, 6, 12, 7, 0, 507, 41, 1, 0, 0, 0, 508, 509, 5, 109, 0, 0, 509, 510, 5, 118, 0, 0, 510, 511, 5, 95, 0, 0, 511, 512, 5, 101, 0, 0, 512, 513, 5, 120, 0, 0, 513, 514, 5, 112, 0, 0, 514, 515, 5, 97, 0, 0, 515, 516, 5, 110, 0, 0, 516, 517, 5, 100, 0, 0, 517, 518, 1, 0, 0, 0, 518, 519, 6, 13, 8, 0, 519, 43, 1, 0, 0, 0, 520, 521, 5, 114, 0, 0, 521, 522, 5, 101, 0, 0, 522, 523, 5, 110, 0, 0, 523, 524, 5, 97, 0, 0, 524, 525, 5, 109, 0, 0, 525, 526, 5, 101, 0, 0, 526, 527, 1, 0, 0, 0, 527, 528, 6, 14, 9, 0, 528, 45, 1, 0, 0, 0, 529, 530, 5, 114, 0, 0, 530, 531, 5, 111, 0, 0, 531, 532, 5, 119, 0, 0, 532, 533, 1, 0, 0, 0, 533, 534, 6, 15, 0, 0, 534, 47, 1, 0, 0, 0, 535, 536, 5, 115, 0, 0, 536, 537, 5, 104, 0, 0, 537, 538, 5, 111, 0, 0, 538, 539, 5, 119, 0, 0, 539, 540, 1, 0, 0, 0, 540, 541, 6, 16, 10, 0, 541, 49, 1, 0, 0, 0, 542, 543, 5, 115, 0, 0, 543, 544, 5, 111, 0, 0, 544, 545, 5, 114, 0, 0, 545, 546, 5, 116, 0, 0, 546, 547, 1, 0, 0, 0, 547, 548, 6, 17, 0, 0, 548, 51, 1, 0, 0, 0, 549, 550, 5, 115, 0, 0, 550, 551, 5, 116, 0, 0, 551, 552, 5, 97, 0, 0, 552, 553, 5, 116, 0, 0, 553, 554, 5, 115, 0, 0, 554, 555, 1, 0, 0, 0, 555, 556, 6, 18, 0, 0, 556, 53, 1, 0, 0, 0, 557, 558, 5, 119, 0, 0, 558, 559, 5, 104, 0, 0, 559, 560, 5, 101, 0, 0, 560, 561, 5, 114, 0, 0, 561, 562, 5, 101, 0, 0, 562, 563, 1, 0, 0, 0, 563, 564, 6, 19, 0, 0, 564, 55, 1, 0, 0, 0, 565, 567, 8, 0, 0, 0, 566, 565, 1, 0, 0, 0, 567, 568, 1, 0, 0, 0, 568, 566, 1, 0, 0, 0, 568, 569, 1, 0, 0, 0, 569, 570, 1, 0, 0, 0, 570, 571, 6, 20, 0, 0, 571, 57, 1, 0, 0, 0, 572, 573, 5, 47, 0, 0, 573, 574, 5, 47, 0, 0, 574, 578, 1, 0, 0, 0, 575, 577, 8, 1, 0, 0, 576, 575, 1, 0, 0, 0, 577, 580, 1, 0, 0, 0, 578, 576, 1, 0, 0, 0, 578, 579, 1, 0, 0, 0, 579, 582, 1, 0, 0, 0, 580, 578, 1, 0, 0, 0, 581, 583, 5, 13, 0, 0, 582, 581, 1, 0, 0, 0, 582, 583, 1, 0, 0, 0, 583, 585, 1, 0, 0, 0, 584, 586, 5, 10, 0, 0, 585, 584, 1, 0, 0, 0, 585, 586, 1, 0, 0, 0, 586, 587, 1, 0, 0, 0, 587, 588, 6, 21, 11, 0, 588, 59, 1, 0, 0, 0, 589, 590, 5, 47, 0, 0, 590, 591, 5, 42, 0, 0, 591, 596, 1, 0, 0, 0, 592, 595, 3, 60, 22, 0, 593, 595, 9, 0, 0, 0, 594, 592, 1, 0, 0, 0, 594, 593, 1, 0, 0, 0, 595, 598, 1, 0, 0, 0, 596, 597, 1, 0, 0, 0, 596, 594, 1, 0, 0, 0, 597, 599, 1, 0, 0, 0, 598, 596, 1, 0, 0, 0, 599, 600, 5, 42, 0, 0, 600, 601, 5, 47, 0, 0, 601, 602, 1, 0, 0, 0, 602, 603, 6, 22, 11, 0, 603, 61, 1, 0, 0, 0, 604, 606, 7, 2, 0, 0, 605, 604, 1, 0, 0, 0, 606, 607, 1, 0, 0, 0, 607, 605, 1, 0, 0, 0, 607, 608, 1, 0, 0, 0, 608, 609, 1, 0, 0, 0, 609, 610, 6, 23, 11, 0, 610, 63, 1, 0, 0, 0, 611, 615, 8, 3, 0, 0, 612, 613, 5, 47, 0, 0, 613, 615, 8, 4, 0, 0, 614, 611, 1, 0, 0, 0, 614, 612, 1, 0, 0, 0, 615, 65, 1, 0, 0, 0, 616, 618, 3, 64, 24, 0, 617, 616, 1, 0, 0, 0, 618, 619, 1, 0, 0, 0, 619, 617, 1, 0, 0, 0, 619, 620, 1, 0, 0, 0, 620, 67, 1, 0, 0, 0, 621, 622, 3, 178, 81, 0, 622, 623, 1, 0, 0, 0, 623, 624, 6, 26, 12, 0, 624, 625, 6, 26, 13, 0, 625, 69, 1, 0, 0, 0, 626, 627, 3, 78, 31, 0, 627, 628, 1, 0, 0, 0, 628, 629, 6, 27, 14, 0, 629, 630, 6, 27, 15, 0, 630, 71, 1, 0, 0, 0, 631, 632, 3, 62, 23, 0, 632, 633, 1, 0, 0, 0, 633, 634, 6, 28, 11, 0, 634, 73, 1, 0, 0, 0, 635, 636, 3, 58, 21, 0, 636, 637, 1, 0, 0, 0, 637, 638, 6, 29, 11, 0, 638, 75, 1, 0, 0, 0, 639, 640, 3, 60, 22, 0, 640, 641, 1, 0, 0, 0, 641, 642, 6, 30, 11, 0, 642, 77, 1, 0, 0, 0, 643, 644, 5, 124, 0, 0, 644, 645, 1, 0, 0, 0, 645, 646, 6, 31, 15, 0, 646, 79, 1, 0, 0, 0, 647, 648, 7, 5, 0, 0, 648, 81, 1, 0, 0, 0, 649, 650, 7, 6, 0, 0, 650, 83, 1, 0, 0, 0, 651, 652, 5, 92, 0, 0, 652, 653, 7, 7, 0, 0, 653, 85, 1, 0, 0, 0, 654, 655, 8, 8, 0, 0, 655, 87, 1, 0, 0, 0, 656, 658, 7, 9, 0, 0, 657, 659, 7, 10, 0, 0, 658, 657, 1, 0, 0, 0, 658, 659, 1, 0, 0, 0, 659, 661, 1, 0, 0, 0, 660, 662, 3, 80, 32, 0, 661, 660, 1, 0, 0, 0, 662, 663, 1, 0, 0, 0, 663, 661, 1, 0, 0, 0, 663, 664, 1, 0, 0, 0, 664, 89, 1, 0, 0, 0, 665, 666, 5, 64, 0, 0, 666, 91, 1, 0, 0, 0, 667, 668, 5, 96, 0, 0, 668, 93, 1, 0, 0, 0, 669, 673, 8, 11, 0, 0, 670, 671, 5, 96, 0, 0, 671, 673, 5, 96, 0, 0, 672, 669, 1, 0, 0, 0, 672, 670, 1, 0, 0, 0, 673, 95, 1, 0, 0, 0, 674, 675, 5, 95, 0, 0, 675, 97, 1, 0, 0, 0, 676, 680, 3, 82, 33, 0, 677, 680, 3, 80, 32, 0, 678, 680, 3, 96, 40, 0, 679, 676, 1, 0, 0, 0, 679, 677, 1, 0, 0, 0, 679, 678, 1, 0, 0, 0, 680, 99, 1, 0, 0, 0, 681, 686, 5, 34, 0, 0, 682, 685, 3, 84, 34, 0, 683, 685, 3, 86, 35, 0, 684, 682, 1, 0, 0, 0, 684, 683, 1, 0, 0, 0, 685, 688, 1, 0, 0, 0, 686, 684, 1, 0, 0, 0, 686, 687, 1, 0, 0, 0, 687, 689, 1, 0, 0, 0, 688, 686, 1, 0, 0, 0, 689, 711, 5, 34, 0, 0, 690, 691, 5, 34, 0, 0, 691, 692, 5, 34, 0, 0, 692, 693, 5, 34, 0, 0, 693, 697, 1, 0, 0, 0, 694, 696, 8, 1, 0, 0, 695, 694, 1, 0, 0, 0, 696, 699, 1, 0, 0, 0, 697, 698, 1, 0, 0, 0, 697, 695, 1, 0, 0, 0, 698, 700, 1, 0, 0, 0, 699, 697, 1, 0, 0, 0, 700, 701, 5, 34, 0, 0, 701, 702, 5, 34, 0, 0, 702, 703, 5, 34, 0, 0, 703, 705, 1, 0, 0, 0, 704, 706, 5, 34, 0, 0, 705, 704, 1, 0, 0, 0, 705, 706, 1, 0, 0, 0, 706, 708, 1, 0, 0, 0, 707, 709, 5, 34, 0, 0, 708, 707, 1, 0, 0, 0, 708, 709, 1, 0, 0, 0, 709, 711, 1, 0, 0, 0, 710, 681, 1, 0, 0, 0, 710, 690, 1, 0, 0, 0, 711, 101, 1, 0, 0, 0, 712, 714, 3, 80, 32, 0, 713, 712, 1, 0, 0, 0, 714, 715, 1, 0, 0, 0, 715, 713, 1, 0, 0, 0, 715, 716, 1, 0, 0, 0, 716, 103, 1, 0, 0, 0, 717, 719, 3, 80, 32, 0, 718, 717, 1, 0, 0, 0, 719, 720, 1, 0, 0, 0, 720, 718, 1, 0, 0, 0, 720, 721, 1, 0, 0, 0, 721, 722, 1, 0, 0, 0, 722, 726, 3, 120, 52, 0, 723, 725, 3, 80, 32, 0, 724, 723, 1, 0, 0, 0, 725, 728, 1, 0, 0, 0, 726, 724, 1, 0, 0, 0, 726, 727, 1, 0, 0, 0, 727, 760, 1, 0, 0, 0, 728, 726, 1, 0, 0, 0, 729, 731, 3, 120, 52, 0, 730, 732, 3, 80, 32, 0, 731, 730, 1, 0, 0, 0, 732, 733, 1, 0, 0, 0, 733, 731, 1, 0, 0, 0, 733, 734, 1, 0, 0, 0, 734, 760, 1, 0, 0, 0, 735, 737, 3, 80, 32, 0, 736, 735, 1, 0, 0, 0, 737, 738, 1, 0, 0, 0, 738, 736, 1, 0, 0, 0, 738, 739, 1, 0, 0, 0, 739, 747, 1, 0, 0, 0, 740, 744, 3, 120, 52, 0, 741, 743, 3, 80, 32, 0, 742, 741, 1, 0, 0, 0, 743, 746, 1, 0, 0, 0, 744, 742, 1, 0, 0, 0, 744, 745, 1, 0, 0, 0, 745, 748, 1, 0, 0, 0, 746, 744, 1, 0, 0, 0, 747, 740, 1, 0, 0, 0, 747, 748, 1, 0, 0, 0, 748, 749, 1, 0, 0, 0, 749, 750, 3, 88, 36, 0, 750, 760, 1, 0, 0, 0, 751, 753, 3, 120, 52, 0, 752, 754, 3, 80, 32, 0, 753, 752, 1, 0, 0, 0, 754, 755, 1, 0, 0, 0, 755, 753, 1, 0, 0, 0, 755, 756, 1, 0, 0, 0, 756, 757, 1, 0, 0, 0, 757, 758, 3, 88, 36, 0, 758, 760, 1, 0, 0, 0, 759, 718, 1, 0, 0, 0, 759, 729, 1, 0, 0, 0, 759, 736, 1, 0, 0, 0, 759, 751, 1, 0, 0, 0, 760, 105, 1, 0, 0, 0, 761, 762, 5, 98, 0, 0, 762, 763, 5, 121, 0, 0, 763, 107, 1, 0, 0, 0, 764, 765, 5, 97, 0, 0, 765, 766, 5, 110, 0, 0, 766, 767, 5, 100, 0, 0, 767, 109, 1, 0, 0, 0, 768, 769, 5, 97, 0, 0, 769, 770, 5, 115, 0, 0, 770, 771, 5, 99, 0, 0, 771, 111, 1, 0, 0, 0, 772, 773, 5, 61, 0, 0, 773, 113, 1, 0, 0, 0, 774, 775, 5, 58, 0, 0, 775, 776, 5, 58, 0, 0, 776, 115, 1, 0, 0, 0, 777, 778, 5, 44, 0, 0, 778, 117, 1, 0, 0, 0, 779, 780, 5, 100, 0, 0, 780, 781, 5, 101, 0, 0, 781, 782, 5, 115, 0, 0, 782, 783, 5, 99, 0, 0, 783, 119, 1, 0, 0, 0, 784, 785, 5, 46, 0, 0, 785, 121, 1, 0, 0, 0, 786, 787, 5, 102, 0, 0, 787, 788, 5, 97, 0, 0, 788, 789, 5, 108, 0, 0, 789, 790, 5, 115, 0, 0, 790, 791, 5, 101, 0, 0, 791, 123, 1, 0, 0, 0, 792, 793, 5, 102, 0, 0, 793, 794, 5, 105, 0, 0, 794, 795, 5, 114, 0, 0, 795, 796, 5, 115, 0, 0, 796, 797, 5, 116, 0, 0, 797, 125, 1, 0, 0, 0, 798, 799, 5, 108, 0, 0, 799, 800, 5, 97, 0, 0, 800, 801, 5, 115, 0, 0, 801, 802, 5, 116, 0, 0, 802, 127, 1, 0, 0, 0, 803, 804, 5, 40, 0, 0, 804, 129, 1, 0, 0, 0, 805, 806, 5, 105, 0, 0, 806, 807, 5, 110, 0, 0, 807, 131, 1, 0, 0, 0, 808, 809, 5, 105, 0, 0, 809, 810, 5, 115, 0, 0, 810, 133, 1, 0, 0, 0, 811, 812, 5, 108, 0, 0, 812, 813, 5, 105, 0, 0, 813, 814, 5, 107, 0, 0, 814, 815, 5, 101, 0, 0, 815, 135, 1, 0, 0, 0, 816, 817, 5, 110, 0, 0, 817, 818, 5, 111, 0, 0, 818, 819, 5, 116, 0, 0, 819, 137, 1, 0, 0, 0, 820, 821, 5, 110, 0, 0, 821, 822, 5, 117, 0, 0, 822, 823, 5, 108, 0, 0, 823, 824, 5, 108, 0, 0, 824, 139, 1, 0, 0, 0, 825, 826, 5, 110, 0, 0, 826, 827, 5, 117, 0, 0, 827, 828, 5, 108, 0, 0, 828, 829, 5, 108, 0, 0, 829, 830, 5, 115, 0, 0, 830, 141, 1, 0, 0, 0, 831, 832, 5, 111, 0, 0, 832, 833, 5, 114, 0, 0, 833, 143, 1, 0, 0, 0, 834, 835, 5, 63, 0, 0, 835, 145, 1, 0, 0, 0, 836, 837, 5, 114, 0, 0, 837, 838, 5, 108, 0, 0, 838, 839, 5, 105, 0, 0, 839, 840, 5, 107, 0, 0, 840, 841, 5, 101, 0, 0, 841, 147, 1, 0, 0, 0, 842, 843, 5, 41, 0, 0, 843, 149, 1, 0, 0, 0, 844, 845, 5, 116, 0, 0, 845, 846, 5, 114, 0, 0, 846, 847, 5, 117, 0, 0, 847, 848, 5, 101, 0, 0, 848, 151, 1, 0, 0, 0, 849, 850, 5, 61, 0, 0, 850, 851, 5, 61, 0, 0, 851, 153, 1, 0, 0, 0, 852, 853, 5, 61, 0, 0, 853, 854, 5, 126, 0, 0, 854, 155, 1, 0, 0, 0, 855, 856, 5, 33, 0, 0, 856, 857, 5, 61, 0, 0, 857, 157, 1, 0, 0, 0, 858, 859, 5, 60, 0, 0, 859, 159, 1, 0, 0, 0, 860, 861, 5, 60, 0, 0, 861, 862, 5, 61, 0, 0, 862, 161, 1, 0, 0, 0, 863, 864, 5, 62, 0, 0, 864, 163, 1, 0, 0, 0, 865, 866, 5, 62, 0, 0, 866, 867, 5, 61, 0, 0, 867, 165, 1, 0, 0, 0, 868, 869, 5, 43, 0, 0, 869, 167, 1, 0, 0, 0, 870, 871, 5, 45, 0, 0, 871, 169, 1, 0, 0, 0, 872, 873, 5, 42, 0, 0, 873, 171, 1, 0, 0, 0, 874, 875, 5, 47, 0, 0, 875, 173, 1, 0, 0, 0, 876, 877, 5, 37, 0, 0, 877, 175, 1, 0, 0, 0, 878, 879, 3, 144, 64, 0, 879, 883, 3, 82, 33, 0, 880, 882, 3, 98, 41, 0, 881, 880, 1, 0, 0, 0, 882, 885, 1, 0, 0, 0, 883, 881, 1, 0, 0, 0, 883, 884, 1, 0, 0, 0, 884, 893, 1, 0, 0, 0, 885, 883, 1, 0, 0, 0, 886, 888, 3, 144, 64, 0, 887, 889, 3, 80, 32, 0, 888, 887, 1, 0, 0, 0, 889, 890, 1, 0, 0, 0, 890, 888, 1, 0, 0, 0, 890, 891, 1, 0, 0, 0, 891, 893, 1, 0, 0, 0, 892, 878, 1, 0, 0, 0, 892, 886, 1, 0, 0, 0, 893, 177, 1, 0, 0, 0, 894, 895, 5, 91, 0, 0, 895, 896, 1, 0, 0, 0, 896, 897, 6, 81, 0, 0, 897, 898, 6, 81, 0, 0, 898, 179, 1, 0, 0, 0, 899, 900, 5, 93, 0, 0, 900, 901, 1, 0, 0, 0, 901, 902, 6, 82, 15, 0, 902, 903, 6, 82, 15, 0, 903, 181, 1, 0, 0, 0, 904, 908, 3, 82, 33, 0, 905, 907, 3, 98, 41, 0, 906, 905, 1, 0, 0, 0, 907, 910, 1, 0, 0, 0, 908, 906, 1, 0, 0, 0, 908, 909, 1, 0, 0, 0, 909, 921, 1, 0, 0, 0, 910, 908, 1, 0, 0, 0, 911, 914, 3, 96, 40, 0, 912, 914, 3, 90, 37, 0, 913, 911, 1, 0, 0, 0, 913, 912, 1, 0, 0, 0, 914, 916, 1, 0, 0, 0, 915, 917, 3, 98, 41, 0, 916, 915, 1, 0, 0, 0, 917, 918, 1, 0, 0, 0, 918, 916, 1, 0, 0, 0, 918, 919, 1, 0, 0, 0, 919, 921, 1, 0, 0, 0, 920, 904, 1, 0, 0, 0, 920, 913, 1, 0, 0, 0, 921, 183, 1, 0, 0, 0, 922, 924, 3, 92, 38, 0, 923, 925, 3, 94, 39, 0, 924, 923, 1, 0, 0, 0, 925, 926, 1, 0, 0, 0, 926, 924, 1, 0, 0, 0, 926, 927, 1, 0, 0, 0, 927, 928, 1, 0, 0, 0, 928, 929, 3, 92, 38, 0, 929, 185, 1, 0, 0, 0, 930, 931, 3, 184, 84, 0, 931, 187, 1, 0, 0, 0, 932, 933, 3, 58, 21, 0, 933, 934, 1, 0, 0, 0, 934, 935, 6, 86, 11, 0, 935, 189, 1, 0, 0, 0, 936, 937, 3, 60, 22, 0, 937, 938, 1, 0, 0, 0, 938, 939, 6, 87, 11, 0, 939, 191, 1, 0, 0, 0, 940, 941, 3, 62, 23, 0, 941, 942, 1, 0, 0, 0, 942, 943, 6, 88, 11, 0, 943, 193, 1, 0, 0, 0, 944, 945, 3, 78, 31, 0, 945, 946, 1, 0, 0, 0, 946, 947, 6, 89, 14, 0, 947, 948, 6, 89, 15, 0, 948, 195, 1, 0, 0, 0, 949, 950, 3, 178, 81, 0, 950, 951, 1, 0, 0, 0, 951, 952, 6, 90, 12, 0, 952, 197, 1, 0, 0, 0, 953, 954, 3, 180, 82, 0, 954, 955, 1, 0, 0, 0, 955, 956, 6, 91, 16, 0, 956, 199, 1, 0, 0, 0, 957, 958, 3, 116, 50, 0, 958, 959, 1, 0, 0, 0, 959, 960, 6, 92, 17, 0, 960, 201, 1, 0, 0, 0, 961, 962, 3, 112, 48, 0, 962, 963, 1, 0, 0, 0, 963, 964, 6, 93, 18, 0, 964, 203, 1, 0, 0, 0, 965, 966, 3, 100, 42, 0, 966, 967, 1, 0, 0, 0, 967, 968, 6, 94, 19, 0, 968, 205, 1, 0, 0, 0, 969, 970, 5, 109, 0, 0, 970, 971, 5, 101, 0, 0, 971, 972, 5, 116, 0, 0, 972, 973, 5, 97, 0, 0, 973, 974, 5, 100, 0, 0, 974, 975, 5, 97, 0, 0, 975, 976, 5, 116, 0, 0, 976, 977, 5, 97, 0, 0, 977, 207, 1, 0, 0, 0, 978, 979, 3, 66, 25, 0, 979, 980, 1, 0, 0, 0, 980, 981, 6, 96, 20, 0, 981, 209, 1, 0, 0, 0, 982, 983, 3, 58, 21, 0, 983, 984, 1, 0, 0, 0, 984, 985, 6, 97, 11, 0, 985, 211, 1, 0, 0, 0, 986, 987, 3, 60, 22, 0, 987, 988, 1, 0, 0, 0, 988, 989, 6, 98, 11, 0, 989, 213, 1, 0, 0, 0, 990, 991, 3, 62, 23, 0, 991, 992, 1, 0, 0, 0, 992, 993, 6, 99, 11, 0, 993, 215, 1, 0, 0, 0, 994, 995, 3, 78, 31, 0, 995, 996, 1, 0, 0, 0, 996, 997, 6, 100, 14, 0, 997, 998, 6, 100, 15, 0, 998, 217, 1, 0, 0, 0, 999, 1000, 3, 120, 52, 0, 1000, 1001, 1, 0, 0, 0, 1001, 1002, 6, 101, 21, 0, 1002, 219, 1, 0, 0, 0, 1003, 1004, 3, 116, 50, 0, 1004, 1005, 1, 0, 0, 0, 1005, 1006, 6, 102, 17, 0, 1006, 221, 1, 0, 0, 0, 1007, 1012, 3, 82, 33, 0, 1008, 1012, 3, 80, 32, 0, 1009, 1012, 3, 96, 40, 0, 1010, 1012, 3, 170, 77, 0, 1011, 1007, 1, 0, 0, 0, 1011, 1008, 1, 0, 0, 0, 1011, 1009, 1, 0, 0, 0, 1011, 1010, 1, 0, 0, 0, 1012, 223, 1, 0, 0, 0, 1013, 1016, 3, 82, 33, 0, 1014, 1016, 3, 170, 77, 0, 1015, 1013, 1, 0, 0, 0, 1015, 1014, 1, 0, 0, 0, 1016, 1020, 1, 0, 0, 0, 1017, 1019, 3, 222, 103, 0, 1018, 1017, 1, 0, 0, 0, 1019, 1022, 1, 0, 0, 0, 1020, 1018, 1, 0, 0, 0, 1020, 1021, 1, 0, 0, 0, 1021, 1033, 1, 0, 0, 0, 1022, 1020, 1, 0, 0, 0, 1023, 1026, 3, 96, 40, 0, 1024, 1026, 3, 90, 37, 0, 1025, 1023, 1, 0, 0, 0, 1025, 1024, 1, 0, 0, 0, 1026, 1028, 1, 0, 0, 0, 1027, 1029, 3, 222, 103, 0, 1028, 1027, 1, 0, 0, 0, 1029, 1030, 1, 0, 0, 0, 1030, 1028, 1, 0, 0, 0, 1030, 1031, 1, 0, 0, 0, 1031, 1033, 1, 0, 0, 0, 1032, 1015, 1, 0, 0, 0, 1032, 1025, 1, 0, 0, 0, 1033, 225, 1, 0, 0, 0, 1034, 1037, 3, 224, 104, 0, 1035, 1037, 3, 184, 84, 0, 1036, 1034, 1, 0, 0, 0, 1036, 1035, 1, 0, 0, 0, 1037, 1038, 1, 0, 0, 0, 1038, 1036, 1, 0, 0, 0, 1038, 1039, 1, 0, 0, 0, 1039, 227, 1, 0, 0, 0, 1040, 1041, 3, 58, 21, 0, 1041, 1042, 1, 0, 0, 0, 1042, 1043, 6, 106, 11, 0, 1043, 229, 1, 0, 0, 0, 1044, 1045, 3, 60, 22, 0, 1045, 1046, 1, 0, 0, 0, 1046, 1047, 6, 107, 11, 0, 1047, 231, 1, 0, 0, 0, 1048, 1049, 3, 62, 23, 0, 1049, 1050, 1, 0, 0, 0, 1050, 1051, 6, 108, 11, 0, 1051, 233, 1, 0, 0, 0, 1052, 1053, 3, 78, 31, 0, 1053, 1054, 1, 0, 0, 0, 1054, 1055, 6, 109, 14, 0, 1055, 1056, 6, 109, 15, 0, 1056, 235, 1, 0, 0, 0, 1057, 1058, 3, 112, 48, 0, 1058, 1059, 1, 0, 0, 0, 1059, 1060, 6, 110, 18, 0, 1060, 237, 1, 0, 0, 0, 1061, 1062, 3, 116, 50, 0, 1062, 1063, 1, 0, 0, 0, 1063, 1064, 6, 111, 17, 0, 1064, 239, 1, 0, 0, 0, 1065, 1066, 3, 120, 52, 0, 1066, 1067, 1, 0, 0, 0, 1067, 1068, 6, 112, 21, 0, 1068, 241, 1, 0, 0, 0, 1069, 1070, 5, 97, 0, 0, 1070, 1071, 5, 115, 0, 0, 1071, 243, 1, 0, 0, 0, 1072, 1073, 3, 226, 105, 0, 1073, 1074, 1, 0, 0, 0, 1074, 1075, 6, 114, 22, 0, 1075, 245, 1, 0, 0, 0, 1076, 1077, 3, 58, 21, 0, 1077, 1078, 1, 0, 0, 0, 1078, 1079, 6, 115, 11, 0, 1079, 247, 1, 0, 0, 0, 1080, 1081, 3, 60, 22, 0, 1081, 1082, 1, 0, 0, 0, 1082, 1083, 6, 116, 11, 0, 1083, 249, 1, 0, 0, 0, 1084, 1085, 3, 62, 23, 0, 1085, 1086, 1, 0, 0, 0, 1086, 1087, 6, 117, 11, 0, 1087, 251, 1, 0, 0, 0, 1088, 1089, 3, 78, 31, 0, 1089, 1090, 1, 0, 0, 0, 1090, 1091, 6, 118, 14, 0, 1091, 1092, 6, 118, 15, 0, 1092, 253, 1, 0, 0, 0, 1093, 1094, 3, 178, 81, 0, 1094, 1095, 1, 0, 0, 0, 1095, 1096, 6, 119, 12, 0, 1096, 1097, 6, 119, 23, 0, 1097, 255, 1, 0, 0, 0, 1098, 1099, 5, 111, 0, 0, 1099, 1100, 5, 110, 0, 0, 1100, 1101, 1, 0, 0, 0, 1101, 1102, 6, 120, 24, 0, 1102, 257, 1, 0, 0, 0, 1103, 1104, 5, 119, 0, 0, 1104, 1105, 5, 105, 0, 0, 1105, 1106, 5, 116, 0, 0, 1106, 1107, 5, 104, 0, 0, 1107, 1108, 1, 0, 0, 0, 1108, 1109, 6, 121, 24, 0, 1109, 259, 1, 0, 0, 0, 1110, 1111, 8, 12, 0, 0, 1111, 261, 1, 0, 0, 0, 1112, 1114, 3, 260, 122, 0, 1113, 1112, 1, 0, 0, 0, 1114, 1115, 1, 0, 0, 0, 1115, 1113, 1, 0, 0, 0, 1115, 1116, 1, 0, 0, 0, 1116, 1117, 1, 0, 0, 0, 1117, 1118, 3, 360, 172, 0, 1118, 1120, 1, 0, 0, 0, 1119, 1113, 1, 0, 0, 0, 1119, 1120, 1, 0, 0, 0, 1120, 1122, 1, 0, 0, 0, 1121, 1123, 3, 260, 122, 0, 1122, 1121, 1, 0, 0, 0, 1123, 1124, 1, 0, 0, 0, 1124, 1122, 1, 0, 0, 0, 1124, 1125, 1, 0, 0, 0, 1125, 263, 1, 0, 0, 0, 1126, 1127, 3, 186, 85, 0, 1127, 1128, 1, 0, 0, 0, 1128, 1129, 6, 124, 25, 0, 1129, 265, 1, 0, 0, 0, 1130, 1131, 3, 262, 123, 0, 1131, 1132, 1, 0, 0, 0, 1132, 1133, 6, 125, 26, 0, 1133, 267, 1, 0, 0, 0, 1134, 1135, 3, 58, 21, 0, 1135, 1136, 1, 0, 0, 0, 1136, 1137, 6, 126, 11, 0, 1137, 269, 1, 0, 0, 0, 1138, 1139, 3, 60, 22, 0, 1139, 1140, 1, 0, 0, 0, 1140, 1141, 6, 127, 11, 0, 1141, 271, 1, 0, 0, 0, 1142, 1143, 3, 62, 23, 0, 1143, 1144, 1, 0, 0, 0, 1144, 1145, 6, 128, 11, 0, 1145, 273, 1, 0, 0, 0, 1146, 1147, 3, 78, 31, 0, 1147, 1148, 1, 0, 0, 0, 1148, 1149, 6, 129, 14, 0, 1149, 1150, 6, 129, 15, 0, 1150, 1151, 6, 129, 15, 0, 1151, 275, 1, 0, 0, 0, 1152, 1153, 3, 112, 48, 0, 1153, 1154, 1, 0, 0, 0, 1154, 1155, 6, 130, 18, 0, 1155, 277, 1, 0, 0, 0, 1156, 1157, 3, 116, 50, 0, 1157, 1158, 1, 0, 0, 0, 1158, 1159, 6, 131, 17, 0, 1159, 279, 1, 0, 0, 0, 1160, 1161, 3, 120, 52, 0, 1161, 1162, 1, 0, 0, 0, 1162, 1163, 6, 132, 21, 0, 1163, 281, 1, 0, 0, 0, 1164, 1165, 3, 258, 121, 0, 1165, 1166, 1, 0, 0, 0, 1166, 1167, 6, 133, 27, 0, 1167, 283, 1, 0, 0, 0, 1168, 1169, 3, 226, 105, 0, 1169, 1170, 1, 0, 0, 0, 1170, 1171, 6, 134, 22, 0, 1171, 285, 1, 0, 0, 0, 1172, 1173, 3, 186, 85, 0, 1173, 1174, 1, 0, 0, 0, 1174, 1175, 6, 135, 25, 0, 1175, 287, 1, 0, 0, 0, 1176, 1177, 3, 58, 21, 0, 1177, 1178, 1, 0, 0, 0, 1178, 1179, 6, 136, 11, 0, 1179, 289, 1, 0, 0, 0, 1180, 1181, 3, 60, 22, 0, 1181, 1182, 1, 0, 0, 0, 1182, 1183, 6, 137, 11, 0, 1183, 291, 1, 0, 0, 0, 1184, 1185, 3, 62, 23, 0, 1185, 1186, 1, 0, 0, 0, 1186, 1187, 6, 138, 11, 0, 1187, 293, 1, 0, 0, 0, 1188, 1189, 3, 78, 31, 0, 1189, 1190, 1, 0, 0, 0, 1190, 1191, 6, 139, 14, 0, 1191, 1192, 6, 139, 15, 0, 1192, 295, 1, 0, 0, 0, 1193, 1194, 3, 116, 50, 0, 1194, 1195, 1, 0, 0, 0, 1195, 1196, 6, 140, 17, 0, 1196, 297, 1, 0, 0, 0, 1197, 1198, 3, 120, 52, 0, 1198, 1199, 1, 0, 0, 0, 1199, 1200, 6, 141, 21, 0, 1200, 299, 1, 0, 0, 0, 1201, 1202, 3, 256, 120, 0, 1202, 1203, 1, 0, 0, 0, 1203, 1204, 6, 142, 28, 0, 1204, 1205, 6, 142, 29, 0, 1205, 301, 1, 0, 0, 0, 1206, 1207, 3, 66, 25, 0, 1207, 1208, 1, 0, 0, 0, 1208, 1209, 6, 143, 20, 0, 1209, 303, 1, 0, 0, 0, 1210, 1211, 3, 58, 21, 0, 1211, 1212, 1, 0, 0, 0, 1212, 1213, 6, 144, 11, 0, 1213, 305, 1, 0, 0, 0, 1214, 1215, 3, 60, 22, 0, 1215, 1216, 1, 0, 0, 0, 1216, 1217, 6, 145, 11, 0, 1217, 307, 1, 0, 0, 0, 1218, 1219, 3, 62, 23, 0, 1219, 1220, 1, 0, 0, 0, 1220, 1221, 6, 146, 11, 0, 1221, 309, 1, 0, 0, 0, 1222, 1223, 3, 78, 31, 0, 1223, 1224, 1, 0, 0, 0, 1224, 1225, 6, 147, 14, 0, 1225, 1226, 6, 147, 15, 0, 1226, 1227, 6, 147, 15, 0, 1227, 311, 1, 0, 0, 0, 1228, 1229, 3, 116, 50, 0, 1229, 1230, 1, 0, 0, 0, 1230, 1231, 6, 148, 17, 0, 1231, 313, 1, 0, 0, 0, 1232, 1233, 3, 120, 52, 0, 1233, 1234, 1, 0, 0, 0, 1234, 1235, 6, 149, 21, 0, 1235, 315, 1, 0, 0, 0, 1236, 1237, 3, 226, 105, 0, 1237, 1238, 1, 0, 0, 0, 1238, 1239, 6, 150, 22, 0, 1239, 317, 1, 0, 0, 0, 1240, 1241, 3, 58, 21, 0, 1241, 1242, 1, 0, 0, 0, 1242, 1243, 6, 151, 11, 0, 1243, 319, 1, 0, 0, 0, 1244, 1245, 3, 60, 22, 0, 1245, 1246, 1, 0, 0, 0, 1246, 1247, 6, 152, 11, 0, 1247, 321, 1, 0, 0, 0, 1248, 1249, 3, 62, 23, 0, 1249, 1250, 1, 0, 0, 0, 1250, 1251, 6, 153, 11, 0, 1251, 323, 1, 0, 0, 0, 1252, 1253, 3, 78, 31, 0, 1253, 1254, 1, 0, 0, 0, 1254, 1255, 6, 154, 14, 0, 1255, 1256, 6, 154, 15, 0, 1256, 325, 1, 0, 0, 0, 1257, 1258, 3, 120, 52, 0, 1258, 1259, 1, 0, 0, 0, 1259, 1260, 6, 155, 21, 0, 1260, 327, 1, 0, 0, 0, 1261, 1262, 3, 186, 85, 0, 1262, 1263, 1, 0, 0, 0, 1263, 1264, 6, 156, 25, 0, 1264, 329, 1, 0, 0, 0, 1265, 1266, 3, 182, 83, 0, 1266, 1267, 1, 0, 0, 0, 1267, 1268, 6, 157, 30, 0, 1268, 331, 1, 0, 0, 0, 1269, 1270, 3, 58, 21, 0, 1270, 1271, 1, 0, 0, 0, 1271, 1272, 6, 158, 11, 0, 1272, 333, 1, 0, 0, 0, 1273, 1274, 3, 60, 22, 0, 1274, 1275, 1, 0, 0, 0, 1275, 1276, 6, 159, 11, 0, 1276, 335, 1, 0, 0, 0, 1277, 1278, 3, 62, 23, 0, 1278, 1279, 1, 0, 0, 0, 1279, 1280, 6, 160, 11, 0, 1280, 337, 1, 0, 0, 0, 1281, 1282, 3, 78, 31, 0, 1282, 1283, 1, 0, 0, 0, 1283, 1284, 6, 161, 14, 0, 1284, 1285, 6, 161, 15, 0, 1285, 339, 1, 0, 0, 0, 1286, 1287, 5, 105, 0, 0, 1287, 1288, 5, 110, 0, 0, 1288, 1289, 5, 102, 0, 0, 1289, 1290, 5, 111, 0, 0, 1290, 341, 1, 0, 0, 0, 1291, 1292, 3, 58, 21, 0, 1292, 1293, 1, 0, 0, 0, 1293, 1294, 6, 163, 11, 0, 1294, 343, 1, 0, 0, 0, 1295, 1296, 3, 60, 22, 0, 1296, 1297, 1, 0, 0, 0, 1297, 1298, 6, 164, 11, 0, 1298, 345, 1, 0, 0, 0, 1299, 1300, 3, 62, 23, 0, 1300, 1301, 1, 0, 0, 0, 1301, 1302, 6, 165, 11, 0, 1302, 347, 1, 0, 0, 0, 1303, 1304, 3, 78, 31, 0, 1304, 1305, 1, 0, 0, 0, 1305, 1306, 6, 166, 14, 0, 1306, 1307, 6, 166, 15, 0, 1307, 349, 1, 0, 0, 0, 1308, 1309, 5, 102, 0, 0, 1309, 1310, 5, 117, 0, 0, 1310, 1311, 5, 110, 0, 0, 1311, 1312, 5, 99, 0, 0, 1312, 1313, 5, 116, 0, 0, 1313, 1314, 5, 105, 0, 0, 1314, 1315, 5, 111, 0, 0, 1315, 1316, 5, 110, 0, 0, 1316, 1317, 5, 115, 0, 0, 1317, 351, 1, 0, 0, 0, 1318, 1319, 3, 58, 21, 0, 1319, 1320, 1, 0, 0, 0, 1320, 1321, 6, 168, 11, 0, 1321, 353, 1, 0, 0, 0, 1322, 1323, 3, 60, 22, 0, 1323, 1324, 1, 0, 0, 0, 1324, 1325, 6, 169, 11, 0, 1325, 355, 1, 0, 0, 0, 1326, 1327, 3, 62, 23, 0, 1327, 1328, 1, 0, 0, 0, 1328, 1329, 6, 170, 11, 0, 1329, 357, 1, 0, 0, 0, 1330, 1331, 3, 180, 82, 0, 1331, 1332, 1, 0, 0, 0, 1332, 1333, 6, 171, 16, 0, 1333, 1334, 6, 171, 15, 0, 1334, 359, 1, 0, 0, 0, 1335, 1336, 5, 58, 0, 0, 1336, 361, 1, 0, 0, 0, 1337, 1343, 3, 90, 37, 0, 1338, 1343, 3, 80, 32, 0, 1339, 1343, 3, 120, 52, 0, 1340, 1343, 3, 82, 33, 0, 1341, 1343, 3, 96, 40, 0, 1342, 1337, 1, 0, 0, 0, 1342, 1338, 1, 0, 0, 0, 1342, 1339, 1, 0, 0, 0, 1342, 1340, 1, 0, 0, 0, 1342, 1341, 1, 0, 0, 0, 1343, 1344, 1, 0, 0, 0, 1344, 1342, 1, 0, 0, 0, 1344, 1345, 1, 0, 0, 0, 1345, 363, 1, 0, 0, 0, 1346, 1347, 3, 58, 21, 0, 1347, 1348, 1, 0, 0, 0, 1348, 1349, 6, 174, 11, 0, 1349, 365, 1, 0, 0, 0, 1350, 1351, 3, 60, 22, 0, 1351, 1352, 1, 0, 0, 0, 1352, 1353, 6, 175, 11, 0, 1353, 367, 1, 0, 0, 0, 1354, 1355, 3, 62, 23, 0, 1355, 1356, 1, 0, 0, 0, 1356, 1357, 6, 176, 11, 0, 1357, 369, 1, 0, 0, 0, 1358, 1359, 3, 78, 31, 0, 1359, 1360, 1, 0, 0, 0, 1360, 1361, 6, 177, 14, 0, 1361, 1362, 6, 177, 15, 0, 1362, 371, 1, 0, 0, 0, 1363, 1364, 3, 66, 25, 0, 1364, 1365, 1, 0, 0, 0, 1365, 1366, 6, 178, 20, 0, 1366, 1367, 6, 178, 15, 0, 1367, 1368, 6, 178, 31, 0, 1368, 373, 1, 0, 0, 0, 1369, 1370, 3, 58, 21, 0, 1370, 1371, 1, 0, 0, 0, 1371, 1372, 6, 179, 11, 0, 1372, 375, 1, 0, 0, 0, 1373, 1374, 3, 60, 22, 0, 1374, 1375, 1, 0, 0, 0, 1375, 1376, 6, 180, 11, 0, 1376, 377, 1, 0, 0, 0, 1377, 1378, 3, 62, 23, 0, 1378, 1379, 1, 0, 0, 0, 1379, 1380, 6, 181, 11, 0, 1380, 379, 1, 0, 0, 0, 1381, 1382, 3, 116, 50, 0, 1382, 1383, 1, 0, 0, 0, 1383, 1384, 6, 182, 17, 0, 1384, 1385, 6, 182, 15, 0, 1385, 1386, 6, 182, 7, 0, 1386, 381, 1, 0, 0, 0, 1387, 1388, 3, 58, 21, 0, 1388, 1389, 1, 0, 0, 0, 1389, 1390, 6, 183, 11, 0, 1390, 383, 1, 0, 0, 0, 1391, 1392, 3, 60, 22, 0, 1392, 1393, 1, 0, 0, 0, 1393, 1394, 6, 184, 11, 0, 1394, 385, 1, 0, 0, 0, 1395, 1396, 3, 62, 23, 0, 1396, 1397, 1, 0, 0, 0, 1397, 1398, 6, 185, 11, 0, 1398, 387, 1, 0, 0, 0, 1399, 1400, 3, 186, 85, 0, 1400, 1401, 1, 0, 0, 0, 1401, 1402, 6, 186, 15, 0, 1402, 1403, 6, 186, 0, 0, 1403, 1404, 6, 186, 25, 0, 1404, 389, 1, 0, 0, 0, 1405, 1406, 3, 182, 83, 0, 1406, 1407, 1, 0, 0, 0, 1407, 1408, 6, 187, 15, 0, 1408, 1409, 6, 187, 0, 0, 1409, 1410, 6, 187, 30, 0, 1410, 391, 1, 0, 0, 0, 1411, 1412, 3, 106, 45, 0, 1412, 1413, 1, 0, 0, 0, 1413, 1414, 6, 188, 15, 0, 1414, 1415, 6, 188, 0, 0, 1415, 1416, 6, 188, 32, 0, 1416, 393, 1, 0, 0, 0, 1417, 1418, 3, 78, 31, 0, 1418, 1419, 1, 0, 0, 0, 1419, 1420, 6, 189, 14, 0, 1420, 1421, 6, 189, 15, 0, 1421, 395, 1, 0, 0, 0, 65, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 568, 578, 582, 585, 594, 596, 607, 614, 619, 658, 663, 672, 679, 684, 686, 697, 705, 708, 710, 715, 720, 726, 733, 738, 744, 747, 755, 759, 883, 890, 892, 908, 913, 918, 920, 926, 1011, 1015, 1020, 1025, 1030, 1032, 1036, 1038, 1115, 1119, 1124, 1342, 1344, 33, 5, 2, 0, 5, 4, 0, 5, 6, 0, 5, 1, 0, 5, 3, 0, 5, 8, 0, 5, 12, 0, 5, 14, 0, 5, 10, 0, 5, 5, 0, 5, 11, 0, 0, 1, 0, 7, 69, 0, 5, 0, 0, 7, 29, 0, 4, 0, 0, 7, 70, 0, 7, 38, 0, 7, 36, 0, 7, 30, 0, 7, 25, 0, 7, 40, 0, 7, 80, 0, 5, 13, 0, 5, 7, 0, 7, 72, 0, 7, 90, 0, 7, 89, 0, 7, 88, 0, 5, 9, 0, 7, 71, 0, 5, 15, 0, 7, 33, 0] \ No newline at end of file +[4, 0, 124, 1450, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 4, 20, 575, 8, 20, 11, 20, 12, 20, 576, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 585, 8, 21, 10, 21, 12, 21, 588, 9, 21, 1, 21, 3, 21, 591, 8, 21, 1, 21, 3, 21, 594, 8, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 603, 8, 22, 10, 22, 12, 22, 606, 9, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 4, 23, 614, 8, 23, 11, 23, 12, 23, 615, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 3, 24, 623, 8, 24, 1, 25, 4, 25, 626, 8, 25, 11, 25, 12, 25, 627, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 36, 1, 36, 3, 36, 667, 8, 36, 1, 36, 4, 36, 670, 8, 36, 11, 36, 12, 36, 671, 1, 37, 1, 37, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 3, 39, 681, 8, 39, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 3, 41, 688, 8, 41, 1, 42, 1, 42, 1, 42, 5, 42, 693, 8, 42, 10, 42, 12, 42, 696, 9, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 5, 42, 704, 8, 42, 10, 42, 12, 42, 707, 9, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 3, 42, 714, 8, 42, 1, 42, 3, 42, 717, 8, 42, 3, 42, 719, 8, 42, 1, 43, 4, 43, 722, 8, 43, 11, 43, 12, 43, 723, 1, 44, 4, 44, 727, 8, 44, 11, 44, 12, 44, 728, 1, 44, 1, 44, 5, 44, 733, 8, 44, 10, 44, 12, 44, 736, 9, 44, 1, 44, 1, 44, 4, 44, 740, 8, 44, 11, 44, 12, 44, 741, 1, 44, 4, 44, 745, 8, 44, 11, 44, 12, 44, 746, 1, 44, 1, 44, 5, 44, 751, 8, 44, 10, 44, 12, 44, 754, 9, 44, 3, 44, 756, 8, 44, 1, 44, 1, 44, 1, 44, 1, 44, 4, 44, 762, 8, 44, 11, 44, 12, 44, 763, 1, 44, 1, 44, 3, 44, 768, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 76, 1, 76, 1, 77, 1, 77, 1, 78, 1, 78, 1, 79, 1, 79, 1, 80, 1, 80, 1, 80, 5, 80, 890, 8, 80, 10, 80, 12, 80, 893, 9, 80, 1, 80, 1, 80, 4, 80, 897, 8, 80, 11, 80, 12, 80, 898, 3, 80, 901, 8, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 5, 83, 915, 8, 83, 10, 83, 12, 83, 918, 9, 83, 1, 83, 1, 83, 3, 83, 922, 8, 83, 1, 83, 4, 83, 925, 8, 83, 11, 83, 12, 83, 926, 3, 83, 929, 8, 83, 1, 84, 1, 84, 4, 84, 933, 8, 84, 11, 84, 12, 84, 934, 1, 84, 1, 84, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 3, 104, 1024, 8, 104, 1, 105, 1, 105, 3, 105, 1028, 8, 105, 1, 105, 5, 105, 1031, 8, 105, 10, 105, 12, 105, 1034, 9, 105, 1, 105, 1, 105, 3, 105, 1038, 8, 105, 1, 105, 4, 105, 1041, 8, 105, 11, 105, 12, 105, 1042, 3, 105, 1045, 8, 105, 1, 106, 1, 106, 4, 106, 1049, 8, 106, 11, 106, 12, 106, 1050, 1, 107, 1, 107, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 124, 4, 124, 1126, 8, 124, 11, 124, 12, 124, 1127, 1, 124, 1, 124, 3, 124, 1132, 8, 124, 1, 124, 4, 124, 1135, 8, 124, 11, 124, 12, 124, 1136, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 4, 175, 1359, 8, 175, 11, 175, 12, 175, 1360, 1, 176, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 2, 604, 705, 0, 194, 16, 1, 18, 2, 20, 3, 22, 4, 24, 5, 26, 6, 28, 7, 30, 8, 32, 9, 34, 10, 36, 11, 38, 12, 40, 13, 42, 14, 44, 15, 46, 16, 48, 17, 50, 18, 52, 19, 54, 20, 56, 21, 58, 22, 60, 23, 62, 24, 64, 0, 66, 25, 68, 0, 70, 0, 72, 26, 74, 27, 76, 28, 78, 29, 80, 0, 82, 0, 84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0, 96, 0, 98, 0, 100, 30, 102, 31, 104, 32, 106, 33, 108, 34, 110, 35, 112, 36, 114, 37, 116, 38, 118, 39, 120, 40, 122, 41, 124, 42, 126, 43, 128, 44, 130, 45, 132, 46, 134, 47, 136, 48, 138, 49, 140, 50, 142, 51, 144, 52, 146, 53, 148, 54, 150, 55, 152, 56, 154, 57, 156, 58, 158, 59, 160, 60, 162, 61, 164, 62, 166, 63, 168, 64, 170, 65, 172, 66, 174, 67, 176, 68, 178, 69, 180, 70, 182, 71, 184, 0, 186, 72, 188, 73, 190, 74, 192, 75, 194, 0, 196, 0, 198, 0, 200, 0, 202, 0, 204, 0, 206, 76, 208, 0, 210, 0, 212, 77, 214, 78, 216, 79, 218, 0, 220, 0, 222, 0, 224, 0, 226, 0, 228, 80, 230, 81, 232, 82, 234, 83, 236, 0, 238, 0, 240, 0, 242, 0, 244, 84, 246, 0, 248, 85, 250, 86, 252, 87, 254, 0, 256, 0, 258, 88, 260, 89, 262, 0, 264, 90, 266, 0, 268, 91, 270, 92, 272, 93, 274, 0, 276, 0, 278, 0, 280, 0, 282, 0, 284, 0, 286, 0, 288, 94, 290, 95, 292, 96, 294, 0, 296, 0, 298, 0, 300, 0, 302, 0, 304, 0, 306, 0, 308, 97, 310, 98, 312, 99, 314, 0, 316, 0, 318, 0, 320, 0, 322, 100, 324, 101, 326, 102, 328, 0, 330, 0, 332, 0, 334, 0, 336, 103, 338, 104, 340, 105, 342, 0, 344, 106, 346, 107, 348, 108, 350, 109, 352, 0, 354, 110, 356, 111, 358, 112, 360, 113, 362, 0, 364, 114, 366, 115, 368, 116, 370, 117, 372, 118, 374, 0, 376, 0, 378, 0, 380, 119, 382, 120, 384, 121, 386, 0, 388, 0, 390, 122, 392, 123, 394, 124, 396, 0, 398, 0, 400, 0, 402, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 11, 0, 9, 10, 13, 13, 32, 32, 34, 34, 44, 44, 47, 47, 58, 58, 61, 61, 91, 91, 93, 93, 124, 124, 2, 0, 42, 42, 47, 47, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 5, 0, 34, 34, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1476, 0, 16, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 0, 20, 1, 0, 0, 0, 0, 22, 1, 0, 0, 0, 0, 24, 1, 0, 0, 0, 0, 26, 1, 0, 0, 0, 0, 28, 1, 0, 0, 0, 0, 30, 1, 0, 0, 0, 0, 32, 1, 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, 36, 1, 0, 0, 0, 0, 38, 1, 0, 0, 0, 0, 40, 1, 0, 0, 0, 0, 42, 1, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 46, 1, 0, 0, 0, 0, 48, 1, 0, 0, 0, 0, 50, 1, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 54, 1, 0, 0, 0, 0, 56, 1, 0, 0, 0, 0, 58, 1, 0, 0, 0, 0, 60, 1, 0, 0, 0, 0, 62, 1, 0, 0, 0, 0, 66, 1, 0, 0, 0, 1, 68, 1, 0, 0, 0, 1, 70, 1, 0, 0, 0, 1, 72, 1, 0, 0, 0, 1, 74, 1, 0, 0, 0, 1, 76, 1, 0, 0, 0, 2, 78, 1, 0, 0, 0, 2, 100, 1, 0, 0, 0, 2, 102, 1, 0, 0, 0, 2, 104, 1, 0, 0, 0, 2, 106, 1, 0, 0, 0, 2, 108, 1, 0, 0, 0, 2, 110, 1, 0, 0, 0, 2, 112, 1, 0, 0, 0, 2, 114, 1, 0, 0, 0, 2, 116, 1, 0, 0, 0, 2, 118, 1, 0, 0, 0, 2, 120, 1, 0, 0, 0, 2, 122, 1, 0, 0, 0, 2, 124, 1, 0, 0, 0, 2, 126, 1, 0, 0, 0, 2, 128, 1, 0, 0, 0, 2, 130, 1, 0, 0, 0, 2, 132, 1, 0, 0, 0, 2, 134, 1, 0, 0, 0, 2, 136, 1, 0, 0, 0, 2, 138, 1, 0, 0, 0, 2, 140, 1, 0, 0, 0, 2, 142, 1, 0, 0, 0, 2, 144, 1, 0, 0, 0, 2, 146, 1, 0, 0, 0, 2, 148, 1, 0, 0, 0, 2, 150, 1, 0, 0, 0, 2, 152, 1, 0, 0, 0, 2, 154, 1, 0, 0, 0, 2, 156, 1, 0, 0, 0, 2, 158, 1, 0, 0, 0, 2, 160, 1, 0, 0, 0, 2, 162, 1, 0, 0, 0, 2, 164, 1, 0, 0, 0, 2, 166, 1, 0, 0, 0, 2, 168, 1, 0, 0, 0, 2, 170, 1, 0, 0, 0, 2, 172, 1, 0, 0, 0, 2, 174, 1, 0, 0, 0, 2, 176, 1, 0, 0, 0, 2, 178, 1, 0, 0, 0, 2, 180, 1, 0, 0, 0, 2, 182, 1, 0, 0, 0, 2, 186, 1, 0, 0, 0, 2, 188, 1, 0, 0, 0, 2, 190, 1, 0, 0, 0, 2, 192, 1, 0, 0, 0, 3, 194, 1, 0, 0, 0, 3, 196, 1, 0, 0, 0, 3, 198, 1, 0, 0, 0, 3, 200, 1, 0, 0, 0, 3, 202, 1, 0, 0, 0, 3, 204, 1, 0, 0, 0, 3, 206, 1, 0, 0, 0, 3, 208, 1, 0, 0, 0, 3, 210, 1, 0, 0, 0, 3, 212, 1, 0, 0, 0, 3, 214, 1, 0, 0, 0, 3, 216, 1, 0, 0, 0, 4, 218, 1, 0, 0, 0, 4, 220, 1, 0, 0, 0, 4, 222, 1, 0, 0, 0, 4, 228, 1, 0, 0, 0, 4, 230, 1, 0, 0, 0, 4, 232, 1, 0, 0, 0, 4, 234, 1, 0, 0, 0, 5, 236, 1, 0, 0, 0, 5, 238, 1, 0, 0, 0, 5, 240, 1, 0, 0, 0, 5, 242, 1, 0, 0, 0, 5, 244, 1, 0, 0, 0, 5, 246, 1, 0, 0, 0, 5, 248, 1, 0, 0, 0, 5, 250, 1, 0, 0, 0, 5, 252, 1, 0, 0, 0, 6, 254, 1, 0, 0, 0, 6, 256, 1, 0, 0, 0, 6, 258, 1, 0, 0, 0, 6, 260, 1, 0, 0, 0, 6, 264, 1, 0, 0, 0, 6, 266, 1, 0, 0, 0, 6, 268, 1, 0, 0, 0, 6, 270, 1, 0, 0, 0, 6, 272, 1, 0, 0, 0, 7, 274, 1, 0, 0, 0, 7, 276, 1, 0, 0, 0, 7, 278, 1, 0, 0, 0, 7, 280, 1, 0, 0, 0, 7, 282, 1, 0, 0, 0, 7, 284, 1, 0, 0, 0, 7, 286, 1, 0, 0, 0, 7, 288, 1, 0, 0, 0, 7, 290, 1, 0, 0, 0, 7, 292, 1, 0, 0, 0, 8, 294, 1, 0, 0, 0, 8, 296, 1, 0, 0, 0, 8, 298, 1, 0, 0, 0, 8, 300, 1, 0, 0, 0, 8, 302, 1, 0, 0, 0, 8, 304, 1, 0, 0, 0, 8, 306, 1, 0, 0, 0, 8, 308, 1, 0, 0, 0, 8, 310, 1, 0, 0, 0, 8, 312, 1, 0, 0, 0, 9, 314, 1, 0, 0, 0, 9, 316, 1, 0, 0, 0, 9, 318, 1, 0, 0, 0, 9, 320, 1, 0, 0, 0, 9, 322, 1, 0, 0, 0, 9, 324, 1, 0, 0, 0, 9, 326, 1, 0, 0, 0, 10, 328, 1, 0, 0, 0, 10, 330, 1, 0, 0, 0, 10, 332, 1, 0, 0, 0, 10, 334, 1, 0, 0, 0, 10, 336, 1, 0, 0, 0, 10, 338, 1, 0, 0, 0, 10, 340, 1, 0, 0, 0, 11, 342, 1, 0, 0, 0, 11, 344, 1, 0, 0, 0, 11, 346, 1, 0, 0, 0, 11, 348, 1, 0, 0, 0, 11, 350, 1, 0, 0, 0, 12, 352, 1, 0, 0, 0, 12, 354, 1, 0, 0, 0, 12, 356, 1, 0, 0, 0, 12, 358, 1, 0, 0, 0, 12, 360, 1, 0, 0, 0, 13, 362, 1, 0, 0, 0, 13, 364, 1, 0, 0, 0, 13, 366, 1, 0, 0, 0, 13, 368, 1, 0, 0, 0, 13, 370, 1, 0, 0, 0, 13, 372, 1, 0, 0, 0, 14, 374, 1, 0, 0, 0, 14, 376, 1, 0, 0, 0, 14, 378, 1, 0, 0, 0, 14, 380, 1, 0, 0, 0, 14, 382, 1, 0, 0, 0, 14, 384, 1, 0, 0, 0, 15, 386, 1, 0, 0, 0, 15, 388, 1, 0, 0, 0, 15, 390, 1, 0, 0, 0, 15, 392, 1, 0, 0, 0, 15, 394, 1, 0, 0, 0, 15, 396, 1, 0, 0, 0, 15, 398, 1, 0, 0, 0, 15, 400, 1, 0, 0, 0, 15, 402, 1, 0, 0, 0, 16, 404, 1, 0, 0, 0, 18, 414, 1, 0, 0, 0, 20, 421, 1, 0, 0, 0, 22, 430, 1, 0, 0, 0, 24, 437, 1, 0, 0, 0, 26, 447, 1, 0, 0, 0, 28, 454, 1, 0, 0, 0, 30, 461, 1, 0, 0, 0, 32, 475, 1, 0, 0, 0, 34, 482, 1, 0, 0, 0, 36, 490, 1, 0, 0, 0, 38, 499, 1, 0, 0, 0, 40, 506, 1, 0, 0, 0, 42, 516, 1, 0, 0, 0, 44, 528, 1, 0, 0, 0, 46, 537, 1, 0, 0, 0, 48, 543, 1, 0, 0, 0, 50, 550, 1, 0, 0, 0, 52, 557, 1, 0, 0, 0, 54, 565, 1, 0, 0, 0, 56, 574, 1, 0, 0, 0, 58, 580, 1, 0, 0, 0, 60, 597, 1, 0, 0, 0, 62, 613, 1, 0, 0, 0, 64, 622, 1, 0, 0, 0, 66, 625, 1, 0, 0, 0, 68, 629, 1, 0, 0, 0, 70, 634, 1, 0, 0, 0, 72, 639, 1, 0, 0, 0, 74, 643, 1, 0, 0, 0, 76, 647, 1, 0, 0, 0, 78, 651, 1, 0, 0, 0, 80, 655, 1, 0, 0, 0, 82, 657, 1, 0, 0, 0, 84, 659, 1, 0, 0, 0, 86, 662, 1, 0, 0, 0, 88, 664, 1, 0, 0, 0, 90, 673, 1, 0, 0, 0, 92, 675, 1, 0, 0, 0, 94, 680, 1, 0, 0, 0, 96, 682, 1, 0, 0, 0, 98, 687, 1, 0, 0, 0, 100, 718, 1, 0, 0, 0, 102, 721, 1, 0, 0, 0, 104, 767, 1, 0, 0, 0, 106, 769, 1, 0, 0, 0, 108, 772, 1, 0, 0, 0, 110, 776, 1, 0, 0, 0, 112, 780, 1, 0, 0, 0, 114, 782, 1, 0, 0, 0, 116, 785, 1, 0, 0, 0, 118, 787, 1, 0, 0, 0, 120, 792, 1, 0, 0, 0, 122, 794, 1, 0, 0, 0, 124, 800, 1, 0, 0, 0, 126, 806, 1, 0, 0, 0, 128, 811, 1, 0, 0, 0, 130, 813, 1, 0, 0, 0, 132, 816, 1, 0, 0, 0, 134, 819, 1, 0, 0, 0, 136, 824, 1, 0, 0, 0, 138, 828, 1, 0, 0, 0, 140, 833, 1, 0, 0, 0, 142, 839, 1, 0, 0, 0, 144, 842, 1, 0, 0, 0, 146, 844, 1, 0, 0, 0, 148, 850, 1, 0, 0, 0, 150, 852, 1, 0, 0, 0, 152, 857, 1, 0, 0, 0, 154, 860, 1, 0, 0, 0, 156, 863, 1, 0, 0, 0, 158, 866, 1, 0, 0, 0, 160, 868, 1, 0, 0, 0, 162, 871, 1, 0, 0, 0, 164, 873, 1, 0, 0, 0, 166, 876, 1, 0, 0, 0, 168, 878, 1, 0, 0, 0, 170, 880, 1, 0, 0, 0, 172, 882, 1, 0, 0, 0, 174, 884, 1, 0, 0, 0, 176, 900, 1, 0, 0, 0, 178, 902, 1, 0, 0, 0, 180, 907, 1, 0, 0, 0, 182, 928, 1, 0, 0, 0, 184, 930, 1, 0, 0, 0, 186, 938, 1, 0, 0, 0, 188, 940, 1, 0, 0, 0, 190, 944, 1, 0, 0, 0, 192, 948, 1, 0, 0, 0, 194, 952, 1, 0, 0, 0, 196, 957, 1, 0, 0, 0, 198, 961, 1, 0, 0, 0, 200, 965, 1, 0, 0, 0, 202, 969, 1, 0, 0, 0, 204, 973, 1, 0, 0, 0, 206, 977, 1, 0, 0, 0, 208, 986, 1, 0, 0, 0, 210, 990, 1, 0, 0, 0, 212, 994, 1, 0, 0, 0, 214, 998, 1, 0, 0, 0, 216, 1002, 1, 0, 0, 0, 218, 1006, 1, 0, 0, 0, 220, 1011, 1, 0, 0, 0, 222, 1015, 1, 0, 0, 0, 224, 1023, 1, 0, 0, 0, 226, 1044, 1, 0, 0, 0, 228, 1048, 1, 0, 0, 0, 230, 1052, 1, 0, 0, 0, 232, 1056, 1, 0, 0, 0, 234, 1060, 1, 0, 0, 0, 236, 1064, 1, 0, 0, 0, 238, 1069, 1, 0, 0, 0, 240, 1073, 1, 0, 0, 0, 242, 1077, 1, 0, 0, 0, 244, 1081, 1, 0, 0, 0, 246, 1084, 1, 0, 0, 0, 248, 1088, 1, 0, 0, 0, 250, 1092, 1, 0, 0, 0, 252, 1096, 1, 0, 0, 0, 254, 1100, 1, 0, 0, 0, 256, 1105, 1, 0, 0, 0, 258, 1110, 1, 0, 0, 0, 260, 1115, 1, 0, 0, 0, 262, 1122, 1, 0, 0, 0, 264, 1131, 1, 0, 0, 0, 266, 1138, 1, 0, 0, 0, 268, 1142, 1, 0, 0, 0, 270, 1146, 1, 0, 0, 0, 272, 1150, 1, 0, 0, 0, 274, 1154, 1, 0, 0, 0, 276, 1160, 1, 0, 0, 0, 278, 1164, 1, 0, 0, 0, 280, 1168, 1, 0, 0, 0, 282, 1172, 1, 0, 0, 0, 284, 1176, 1, 0, 0, 0, 286, 1180, 1, 0, 0, 0, 288, 1184, 1, 0, 0, 0, 290, 1188, 1, 0, 0, 0, 292, 1192, 1, 0, 0, 0, 294, 1196, 1, 0, 0, 0, 296, 1201, 1, 0, 0, 0, 298, 1205, 1, 0, 0, 0, 300, 1209, 1, 0, 0, 0, 302, 1213, 1, 0, 0, 0, 304, 1218, 1, 0, 0, 0, 306, 1222, 1, 0, 0, 0, 308, 1226, 1, 0, 0, 0, 310, 1230, 1, 0, 0, 0, 312, 1234, 1, 0, 0, 0, 314, 1238, 1, 0, 0, 0, 316, 1244, 1, 0, 0, 0, 318, 1248, 1, 0, 0, 0, 320, 1252, 1, 0, 0, 0, 322, 1256, 1, 0, 0, 0, 324, 1260, 1, 0, 0, 0, 326, 1264, 1, 0, 0, 0, 328, 1268, 1, 0, 0, 0, 330, 1273, 1, 0, 0, 0, 332, 1277, 1, 0, 0, 0, 334, 1281, 1, 0, 0, 0, 336, 1285, 1, 0, 0, 0, 338, 1289, 1, 0, 0, 0, 340, 1293, 1, 0, 0, 0, 342, 1297, 1, 0, 0, 0, 344, 1302, 1, 0, 0, 0, 346, 1307, 1, 0, 0, 0, 348, 1311, 1, 0, 0, 0, 350, 1315, 1, 0, 0, 0, 352, 1319, 1, 0, 0, 0, 354, 1324, 1, 0, 0, 0, 356, 1334, 1, 0, 0, 0, 358, 1338, 1, 0, 0, 0, 360, 1342, 1, 0, 0, 0, 362, 1346, 1, 0, 0, 0, 364, 1351, 1, 0, 0, 0, 366, 1358, 1, 0, 0, 0, 368, 1362, 1, 0, 0, 0, 370, 1366, 1, 0, 0, 0, 372, 1370, 1, 0, 0, 0, 374, 1374, 1, 0, 0, 0, 376, 1379, 1, 0, 0, 0, 378, 1385, 1, 0, 0, 0, 380, 1391, 1, 0, 0, 0, 382, 1395, 1, 0, 0, 0, 384, 1399, 1, 0, 0, 0, 386, 1403, 1, 0, 0, 0, 388, 1409, 1, 0, 0, 0, 390, 1415, 1, 0, 0, 0, 392, 1419, 1, 0, 0, 0, 394, 1423, 1, 0, 0, 0, 396, 1427, 1, 0, 0, 0, 398, 1433, 1, 0, 0, 0, 400, 1439, 1, 0, 0, 0, 402, 1445, 1, 0, 0, 0, 404, 405, 5, 100, 0, 0, 405, 406, 5, 105, 0, 0, 406, 407, 5, 115, 0, 0, 407, 408, 5, 115, 0, 0, 408, 409, 5, 101, 0, 0, 409, 410, 5, 99, 0, 0, 410, 411, 5, 116, 0, 0, 411, 412, 1, 0, 0, 0, 412, 413, 6, 0, 0, 0, 413, 17, 1, 0, 0, 0, 414, 415, 5, 100, 0, 0, 415, 416, 5, 114, 0, 0, 416, 417, 5, 111, 0, 0, 417, 418, 5, 112, 0, 0, 418, 419, 1, 0, 0, 0, 419, 420, 6, 1, 1, 0, 420, 19, 1, 0, 0, 0, 421, 422, 5, 101, 0, 0, 422, 423, 5, 110, 0, 0, 423, 424, 5, 114, 0, 0, 424, 425, 5, 105, 0, 0, 425, 426, 5, 99, 0, 0, 426, 427, 5, 104, 0, 0, 427, 428, 1, 0, 0, 0, 428, 429, 6, 2, 2, 0, 429, 21, 1, 0, 0, 0, 430, 431, 5, 101, 0, 0, 431, 432, 5, 118, 0, 0, 432, 433, 5, 97, 0, 0, 433, 434, 5, 108, 0, 0, 434, 435, 1, 0, 0, 0, 435, 436, 6, 3, 0, 0, 436, 23, 1, 0, 0, 0, 437, 438, 5, 101, 0, 0, 438, 439, 5, 120, 0, 0, 439, 440, 5, 112, 0, 0, 440, 441, 5, 108, 0, 0, 441, 442, 5, 97, 0, 0, 442, 443, 5, 105, 0, 0, 443, 444, 5, 110, 0, 0, 444, 445, 1, 0, 0, 0, 445, 446, 6, 4, 3, 0, 446, 25, 1, 0, 0, 0, 447, 448, 5, 102, 0, 0, 448, 449, 5, 114, 0, 0, 449, 450, 5, 111, 0, 0, 450, 451, 5, 109, 0, 0, 451, 452, 1, 0, 0, 0, 452, 453, 6, 5, 4, 0, 453, 27, 1, 0, 0, 0, 454, 455, 5, 103, 0, 0, 455, 456, 5, 114, 0, 0, 456, 457, 5, 111, 0, 0, 457, 458, 5, 107, 0, 0, 458, 459, 1, 0, 0, 0, 459, 460, 6, 6, 0, 0, 460, 29, 1, 0, 0, 0, 461, 462, 5, 105, 0, 0, 462, 463, 5, 110, 0, 0, 463, 464, 5, 108, 0, 0, 464, 465, 5, 105, 0, 0, 465, 466, 5, 110, 0, 0, 466, 467, 5, 101, 0, 0, 467, 468, 5, 115, 0, 0, 468, 469, 5, 116, 0, 0, 469, 470, 5, 97, 0, 0, 470, 471, 5, 116, 0, 0, 471, 472, 5, 115, 0, 0, 472, 473, 1, 0, 0, 0, 473, 474, 6, 7, 0, 0, 474, 31, 1, 0, 0, 0, 475, 476, 5, 107, 0, 0, 476, 477, 5, 101, 0, 0, 477, 478, 5, 101, 0, 0, 478, 479, 5, 112, 0, 0, 479, 480, 1, 0, 0, 0, 480, 481, 6, 8, 1, 0, 481, 33, 1, 0, 0, 0, 482, 483, 5, 108, 0, 0, 483, 484, 5, 105, 0, 0, 484, 485, 5, 109, 0, 0, 485, 486, 5, 105, 0, 0, 486, 487, 5, 116, 0, 0, 487, 488, 1, 0, 0, 0, 488, 489, 6, 9, 0, 0, 489, 35, 1, 0, 0, 0, 490, 491, 5, 108, 0, 0, 491, 492, 5, 111, 0, 0, 492, 493, 5, 111, 0, 0, 493, 494, 5, 107, 0, 0, 494, 495, 5, 117, 0, 0, 495, 496, 5, 112, 0, 0, 496, 497, 1, 0, 0, 0, 497, 498, 6, 10, 5, 0, 498, 37, 1, 0, 0, 0, 499, 500, 5, 109, 0, 0, 500, 501, 5, 101, 0, 0, 501, 502, 5, 116, 0, 0, 502, 503, 5, 97, 0, 0, 503, 504, 1, 0, 0, 0, 504, 505, 6, 11, 6, 0, 505, 39, 1, 0, 0, 0, 506, 507, 5, 109, 0, 0, 507, 508, 5, 101, 0, 0, 508, 509, 5, 116, 0, 0, 509, 510, 5, 114, 0, 0, 510, 511, 5, 105, 0, 0, 511, 512, 5, 99, 0, 0, 512, 513, 5, 115, 0, 0, 513, 514, 1, 0, 0, 0, 514, 515, 6, 12, 7, 0, 515, 41, 1, 0, 0, 0, 516, 517, 5, 109, 0, 0, 517, 518, 5, 118, 0, 0, 518, 519, 5, 95, 0, 0, 519, 520, 5, 101, 0, 0, 520, 521, 5, 120, 0, 0, 521, 522, 5, 112, 0, 0, 522, 523, 5, 97, 0, 0, 523, 524, 5, 110, 0, 0, 524, 525, 5, 100, 0, 0, 525, 526, 1, 0, 0, 0, 526, 527, 6, 13, 8, 0, 527, 43, 1, 0, 0, 0, 528, 529, 5, 114, 0, 0, 529, 530, 5, 101, 0, 0, 530, 531, 5, 110, 0, 0, 531, 532, 5, 97, 0, 0, 532, 533, 5, 109, 0, 0, 533, 534, 5, 101, 0, 0, 534, 535, 1, 0, 0, 0, 535, 536, 6, 14, 9, 0, 536, 45, 1, 0, 0, 0, 537, 538, 5, 114, 0, 0, 538, 539, 5, 111, 0, 0, 539, 540, 5, 119, 0, 0, 540, 541, 1, 0, 0, 0, 541, 542, 6, 15, 0, 0, 542, 47, 1, 0, 0, 0, 543, 544, 5, 115, 0, 0, 544, 545, 5, 104, 0, 0, 545, 546, 5, 111, 0, 0, 546, 547, 5, 119, 0, 0, 547, 548, 1, 0, 0, 0, 548, 549, 6, 16, 10, 0, 549, 49, 1, 0, 0, 0, 550, 551, 5, 115, 0, 0, 551, 552, 5, 111, 0, 0, 552, 553, 5, 114, 0, 0, 553, 554, 5, 116, 0, 0, 554, 555, 1, 0, 0, 0, 555, 556, 6, 17, 0, 0, 556, 51, 1, 0, 0, 0, 557, 558, 5, 115, 0, 0, 558, 559, 5, 116, 0, 0, 559, 560, 5, 97, 0, 0, 560, 561, 5, 116, 0, 0, 561, 562, 5, 115, 0, 0, 562, 563, 1, 0, 0, 0, 563, 564, 6, 18, 0, 0, 564, 53, 1, 0, 0, 0, 565, 566, 5, 119, 0, 0, 566, 567, 5, 104, 0, 0, 567, 568, 5, 101, 0, 0, 568, 569, 5, 114, 0, 0, 569, 570, 5, 101, 0, 0, 570, 571, 1, 0, 0, 0, 571, 572, 6, 19, 0, 0, 572, 55, 1, 0, 0, 0, 573, 575, 8, 0, 0, 0, 574, 573, 1, 0, 0, 0, 575, 576, 1, 0, 0, 0, 576, 574, 1, 0, 0, 0, 576, 577, 1, 0, 0, 0, 577, 578, 1, 0, 0, 0, 578, 579, 6, 20, 0, 0, 579, 57, 1, 0, 0, 0, 580, 581, 5, 47, 0, 0, 581, 582, 5, 47, 0, 0, 582, 586, 1, 0, 0, 0, 583, 585, 8, 1, 0, 0, 584, 583, 1, 0, 0, 0, 585, 588, 1, 0, 0, 0, 586, 584, 1, 0, 0, 0, 586, 587, 1, 0, 0, 0, 587, 590, 1, 0, 0, 0, 588, 586, 1, 0, 0, 0, 589, 591, 5, 13, 0, 0, 590, 589, 1, 0, 0, 0, 590, 591, 1, 0, 0, 0, 591, 593, 1, 0, 0, 0, 592, 594, 5, 10, 0, 0, 593, 592, 1, 0, 0, 0, 593, 594, 1, 0, 0, 0, 594, 595, 1, 0, 0, 0, 595, 596, 6, 21, 11, 0, 596, 59, 1, 0, 0, 0, 597, 598, 5, 47, 0, 0, 598, 599, 5, 42, 0, 0, 599, 604, 1, 0, 0, 0, 600, 603, 3, 60, 22, 0, 601, 603, 9, 0, 0, 0, 602, 600, 1, 0, 0, 0, 602, 601, 1, 0, 0, 0, 603, 606, 1, 0, 0, 0, 604, 605, 1, 0, 0, 0, 604, 602, 1, 0, 0, 0, 605, 607, 1, 0, 0, 0, 606, 604, 1, 0, 0, 0, 607, 608, 5, 42, 0, 0, 608, 609, 5, 47, 0, 0, 609, 610, 1, 0, 0, 0, 610, 611, 6, 22, 11, 0, 611, 61, 1, 0, 0, 0, 612, 614, 7, 2, 0, 0, 613, 612, 1, 0, 0, 0, 614, 615, 1, 0, 0, 0, 615, 613, 1, 0, 0, 0, 615, 616, 1, 0, 0, 0, 616, 617, 1, 0, 0, 0, 617, 618, 6, 23, 11, 0, 618, 63, 1, 0, 0, 0, 619, 623, 8, 3, 0, 0, 620, 621, 5, 47, 0, 0, 621, 623, 8, 4, 0, 0, 622, 619, 1, 0, 0, 0, 622, 620, 1, 0, 0, 0, 623, 65, 1, 0, 0, 0, 624, 626, 3, 64, 24, 0, 625, 624, 1, 0, 0, 0, 626, 627, 1, 0, 0, 0, 627, 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 67, 1, 0, 0, 0, 629, 630, 3, 178, 81, 0, 630, 631, 1, 0, 0, 0, 631, 632, 6, 26, 12, 0, 632, 633, 6, 26, 13, 0, 633, 69, 1, 0, 0, 0, 634, 635, 3, 78, 31, 0, 635, 636, 1, 0, 0, 0, 636, 637, 6, 27, 14, 0, 637, 638, 6, 27, 15, 0, 638, 71, 1, 0, 0, 0, 639, 640, 3, 62, 23, 0, 640, 641, 1, 0, 0, 0, 641, 642, 6, 28, 11, 0, 642, 73, 1, 0, 0, 0, 643, 644, 3, 58, 21, 0, 644, 645, 1, 0, 0, 0, 645, 646, 6, 29, 11, 0, 646, 75, 1, 0, 0, 0, 647, 648, 3, 60, 22, 0, 648, 649, 1, 0, 0, 0, 649, 650, 6, 30, 11, 0, 650, 77, 1, 0, 0, 0, 651, 652, 5, 124, 0, 0, 652, 653, 1, 0, 0, 0, 653, 654, 6, 31, 15, 0, 654, 79, 1, 0, 0, 0, 655, 656, 7, 5, 0, 0, 656, 81, 1, 0, 0, 0, 657, 658, 7, 6, 0, 0, 658, 83, 1, 0, 0, 0, 659, 660, 5, 92, 0, 0, 660, 661, 7, 7, 0, 0, 661, 85, 1, 0, 0, 0, 662, 663, 8, 8, 0, 0, 663, 87, 1, 0, 0, 0, 664, 666, 7, 9, 0, 0, 665, 667, 7, 10, 0, 0, 666, 665, 1, 0, 0, 0, 666, 667, 1, 0, 0, 0, 667, 669, 1, 0, 0, 0, 668, 670, 3, 80, 32, 0, 669, 668, 1, 0, 0, 0, 670, 671, 1, 0, 0, 0, 671, 669, 1, 0, 0, 0, 671, 672, 1, 0, 0, 0, 672, 89, 1, 0, 0, 0, 673, 674, 5, 64, 0, 0, 674, 91, 1, 0, 0, 0, 675, 676, 5, 96, 0, 0, 676, 93, 1, 0, 0, 0, 677, 681, 8, 11, 0, 0, 678, 679, 5, 96, 0, 0, 679, 681, 5, 96, 0, 0, 680, 677, 1, 0, 0, 0, 680, 678, 1, 0, 0, 0, 681, 95, 1, 0, 0, 0, 682, 683, 5, 95, 0, 0, 683, 97, 1, 0, 0, 0, 684, 688, 3, 82, 33, 0, 685, 688, 3, 80, 32, 0, 686, 688, 3, 96, 40, 0, 687, 684, 1, 0, 0, 0, 687, 685, 1, 0, 0, 0, 687, 686, 1, 0, 0, 0, 688, 99, 1, 0, 0, 0, 689, 694, 5, 34, 0, 0, 690, 693, 3, 84, 34, 0, 691, 693, 3, 86, 35, 0, 692, 690, 1, 0, 0, 0, 692, 691, 1, 0, 0, 0, 693, 696, 1, 0, 0, 0, 694, 692, 1, 0, 0, 0, 694, 695, 1, 0, 0, 0, 695, 697, 1, 0, 0, 0, 696, 694, 1, 0, 0, 0, 697, 719, 5, 34, 0, 0, 698, 699, 5, 34, 0, 0, 699, 700, 5, 34, 0, 0, 700, 701, 5, 34, 0, 0, 701, 705, 1, 0, 0, 0, 702, 704, 8, 1, 0, 0, 703, 702, 1, 0, 0, 0, 704, 707, 1, 0, 0, 0, 705, 706, 1, 0, 0, 0, 705, 703, 1, 0, 0, 0, 706, 708, 1, 0, 0, 0, 707, 705, 1, 0, 0, 0, 708, 709, 5, 34, 0, 0, 709, 710, 5, 34, 0, 0, 710, 711, 5, 34, 0, 0, 711, 713, 1, 0, 0, 0, 712, 714, 5, 34, 0, 0, 713, 712, 1, 0, 0, 0, 713, 714, 1, 0, 0, 0, 714, 716, 1, 0, 0, 0, 715, 717, 5, 34, 0, 0, 716, 715, 1, 0, 0, 0, 716, 717, 1, 0, 0, 0, 717, 719, 1, 0, 0, 0, 718, 689, 1, 0, 0, 0, 718, 698, 1, 0, 0, 0, 719, 101, 1, 0, 0, 0, 720, 722, 3, 80, 32, 0, 721, 720, 1, 0, 0, 0, 722, 723, 1, 0, 0, 0, 723, 721, 1, 0, 0, 0, 723, 724, 1, 0, 0, 0, 724, 103, 1, 0, 0, 0, 725, 727, 3, 80, 32, 0, 726, 725, 1, 0, 0, 0, 727, 728, 1, 0, 0, 0, 728, 726, 1, 0, 0, 0, 728, 729, 1, 0, 0, 0, 729, 730, 1, 0, 0, 0, 730, 734, 3, 120, 52, 0, 731, 733, 3, 80, 32, 0, 732, 731, 1, 0, 0, 0, 733, 736, 1, 0, 0, 0, 734, 732, 1, 0, 0, 0, 734, 735, 1, 0, 0, 0, 735, 768, 1, 0, 0, 0, 736, 734, 1, 0, 0, 0, 737, 739, 3, 120, 52, 0, 738, 740, 3, 80, 32, 0, 739, 738, 1, 0, 0, 0, 740, 741, 1, 0, 0, 0, 741, 739, 1, 0, 0, 0, 741, 742, 1, 0, 0, 0, 742, 768, 1, 0, 0, 0, 743, 745, 3, 80, 32, 0, 744, 743, 1, 0, 0, 0, 745, 746, 1, 0, 0, 0, 746, 744, 1, 0, 0, 0, 746, 747, 1, 0, 0, 0, 747, 755, 1, 0, 0, 0, 748, 752, 3, 120, 52, 0, 749, 751, 3, 80, 32, 0, 750, 749, 1, 0, 0, 0, 751, 754, 1, 0, 0, 0, 752, 750, 1, 0, 0, 0, 752, 753, 1, 0, 0, 0, 753, 756, 1, 0, 0, 0, 754, 752, 1, 0, 0, 0, 755, 748, 1, 0, 0, 0, 755, 756, 1, 0, 0, 0, 756, 757, 1, 0, 0, 0, 757, 758, 3, 88, 36, 0, 758, 768, 1, 0, 0, 0, 759, 761, 3, 120, 52, 0, 760, 762, 3, 80, 32, 0, 761, 760, 1, 0, 0, 0, 762, 763, 1, 0, 0, 0, 763, 761, 1, 0, 0, 0, 763, 764, 1, 0, 0, 0, 764, 765, 1, 0, 0, 0, 765, 766, 3, 88, 36, 0, 766, 768, 1, 0, 0, 0, 767, 726, 1, 0, 0, 0, 767, 737, 1, 0, 0, 0, 767, 744, 1, 0, 0, 0, 767, 759, 1, 0, 0, 0, 768, 105, 1, 0, 0, 0, 769, 770, 5, 98, 0, 0, 770, 771, 5, 121, 0, 0, 771, 107, 1, 0, 0, 0, 772, 773, 5, 97, 0, 0, 773, 774, 5, 110, 0, 0, 774, 775, 5, 100, 0, 0, 775, 109, 1, 0, 0, 0, 776, 777, 5, 97, 0, 0, 777, 778, 5, 115, 0, 0, 778, 779, 5, 99, 0, 0, 779, 111, 1, 0, 0, 0, 780, 781, 5, 61, 0, 0, 781, 113, 1, 0, 0, 0, 782, 783, 5, 58, 0, 0, 783, 784, 5, 58, 0, 0, 784, 115, 1, 0, 0, 0, 785, 786, 5, 44, 0, 0, 786, 117, 1, 0, 0, 0, 787, 788, 5, 100, 0, 0, 788, 789, 5, 101, 0, 0, 789, 790, 5, 115, 0, 0, 790, 791, 5, 99, 0, 0, 791, 119, 1, 0, 0, 0, 792, 793, 5, 46, 0, 0, 793, 121, 1, 0, 0, 0, 794, 795, 5, 102, 0, 0, 795, 796, 5, 97, 0, 0, 796, 797, 5, 108, 0, 0, 797, 798, 5, 115, 0, 0, 798, 799, 5, 101, 0, 0, 799, 123, 1, 0, 0, 0, 800, 801, 5, 102, 0, 0, 801, 802, 5, 105, 0, 0, 802, 803, 5, 114, 0, 0, 803, 804, 5, 115, 0, 0, 804, 805, 5, 116, 0, 0, 805, 125, 1, 0, 0, 0, 806, 807, 5, 108, 0, 0, 807, 808, 5, 97, 0, 0, 808, 809, 5, 115, 0, 0, 809, 810, 5, 116, 0, 0, 810, 127, 1, 0, 0, 0, 811, 812, 5, 40, 0, 0, 812, 129, 1, 0, 0, 0, 813, 814, 5, 105, 0, 0, 814, 815, 5, 110, 0, 0, 815, 131, 1, 0, 0, 0, 816, 817, 5, 105, 0, 0, 817, 818, 5, 115, 0, 0, 818, 133, 1, 0, 0, 0, 819, 820, 5, 108, 0, 0, 820, 821, 5, 105, 0, 0, 821, 822, 5, 107, 0, 0, 822, 823, 5, 101, 0, 0, 823, 135, 1, 0, 0, 0, 824, 825, 5, 110, 0, 0, 825, 826, 5, 111, 0, 0, 826, 827, 5, 116, 0, 0, 827, 137, 1, 0, 0, 0, 828, 829, 5, 110, 0, 0, 829, 830, 5, 117, 0, 0, 830, 831, 5, 108, 0, 0, 831, 832, 5, 108, 0, 0, 832, 139, 1, 0, 0, 0, 833, 834, 5, 110, 0, 0, 834, 835, 5, 117, 0, 0, 835, 836, 5, 108, 0, 0, 836, 837, 5, 108, 0, 0, 837, 838, 5, 115, 0, 0, 838, 141, 1, 0, 0, 0, 839, 840, 5, 111, 0, 0, 840, 841, 5, 114, 0, 0, 841, 143, 1, 0, 0, 0, 842, 843, 5, 63, 0, 0, 843, 145, 1, 0, 0, 0, 844, 845, 5, 114, 0, 0, 845, 846, 5, 108, 0, 0, 846, 847, 5, 105, 0, 0, 847, 848, 5, 107, 0, 0, 848, 849, 5, 101, 0, 0, 849, 147, 1, 0, 0, 0, 850, 851, 5, 41, 0, 0, 851, 149, 1, 0, 0, 0, 852, 853, 5, 116, 0, 0, 853, 854, 5, 114, 0, 0, 854, 855, 5, 117, 0, 0, 855, 856, 5, 101, 0, 0, 856, 151, 1, 0, 0, 0, 857, 858, 5, 61, 0, 0, 858, 859, 5, 61, 0, 0, 859, 153, 1, 0, 0, 0, 860, 861, 5, 61, 0, 0, 861, 862, 5, 126, 0, 0, 862, 155, 1, 0, 0, 0, 863, 864, 5, 33, 0, 0, 864, 865, 5, 61, 0, 0, 865, 157, 1, 0, 0, 0, 866, 867, 5, 60, 0, 0, 867, 159, 1, 0, 0, 0, 868, 869, 5, 60, 0, 0, 869, 870, 5, 61, 0, 0, 870, 161, 1, 0, 0, 0, 871, 872, 5, 62, 0, 0, 872, 163, 1, 0, 0, 0, 873, 874, 5, 62, 0, 0, 874, 875, 5, 61, 0, 0, 875, 165, 1, 0, 0, 0, 876, 877, 5, 43, 0, 0, 877, 167, 1, 0, 0, 0, 878, 879, 5, 45, 0, 0, 879, 169, 1, 0, 0, 0, 880, 881, 5, 42, 0, 0, 881, 171, 1, 0, 0, 0, 882, 883, 5, 47, 0, 0, 883, 173, 1, 0, 0, 0, 884, 885, 5, 37, 0, 0, 885, 175, 1, 0, 0, 0, 886, 887, 3, 144, 64, 0, 887, 891, 3, 82, 33, 0, 888, 890, 3, 98, 41, 0, 889, 888, 1, 0, 0, 0, 890, 893, 1, 0, 0, 0, 891, 889, 1, 0, 0, 0, 891, 892, 1, 0, 0, 0, 892, 901, 1, 0, 0, 0, 893, 891, 1, 0, 0, 0, 894, 896, 3, 144, 64, 0, 895, 897, 3, 80, 32, 0, 896, 895, 1, 0, 0, 0, 897, 898, 1, 0, 0, 0, 898, 896, 1, 0, 0, 0, 898, 899, 1, 0, 0, 0, 899, 901, 1, 0, 0, 0, 900, 886, 1, 0, 0, 0, 900, 894, 1, 0, 0, 0, 901, 177, 1, 0, 0, 0, 902, 903, 5, 91, 0, 0, 903, 904, 1, 0, 0, 0, 904, 905, 6, 81, 0, 0, 905, 906, 6, 81, 0, 0, 906, 179, 1, 0, 0, 0, 907, 908, 5, 93, 0, 0, 908, 909, 1, 0, 0, 0, 909, 910, 6, 82, 15, 0, 910, 911, 6, 82, 15, 0, 911, 181, 1, 0, 0, 0, 912, 916, 3, 82, 33, 0, 913, 915, 3, 98, 41, 0, 914, 913, 1, 0, 0, 0, 915, 918, 1, 0, 0, 0, 916, 914, 1, 0, 0, 0, 916, 917, 1, 0, 0, 0, 917, 929, 1, 0, 0, 0, 918, 916, 1, 0, 0, 0, 919, 922, 3, 96, 40, 0, 920, 922, 3, 90, 37, 0, 921, 919, 1, 0, 0, 0, 921, 920, 1, 0, 0, 0, 922, 924, 1, 0, 0, 0, 923, 925, 3, 98, 41, 0, 924, 923, 1, 0, 0, 0, 925, 926, 1, 0, 0, 0, 926, 924, 1, 0, 0, 0, 926, 927, 1, 0, 0, 0, 927, 929, 1, 0, 0, 0, 928, 912, 1, 0, 0, 0, 928, 921, 1, 0, 0, 0, 929, 183, 1, 0, 0, 0, 930, 932, 3, 92, 38, 0, 931, 933, 3, 94, 39, 0, 932, 931, 1, 0, 0, 0, 933, 934, 1, 0, 0, 0, 934, 932, 1, 0, 0, 0, 934, 935, 1, 0, 0, 0, 935, 936, 1, 0, 0, 0, 936, 937, 3, 92, 38, 0, 937, 185, 1, 0, 0, 0, 938, 939, 3, 184, 84, 0, 939, 187, 1, 0, 0, 0, 940, 941, 3, 58, 21, 0, 941, 942, 1, 0, 0, 0, 942, 943, 6, 86, 11, 0, 943, 189, 1, 0, 0, 0, 944, 945, 3, 60, 22, 0, 945, 946, 1, 0, 0, 0, 946, 947, 6, 87, 11, 0, 947, 191, 1, 0, 0, 0, 948, 949, 3, 62, 23, 0, 949, 950, 1, 0, 0, 0, 950, 951, 6, 88, 11, 0, 951, 193, 1, 0, 0, 0, 952, 953, 3, 78, 31, 0, 953, 954, 1, 0, 0, 0, 954, 955, 6, 89, 14, 0, 955, 956, 6, 89, 15, 0, 956, 195, 1, 0, 0, 0, 957, 958, 3, 178, 81, 0, 958, 959, 1, 0, 0, 0, 959, 960, 6, 90, 12, 0, 960, 197, 1, 0, 0, 0, 961, 962, 3, 180, 82, 0, 962, 963, 1, 0, 0, 0, 963, 964, 6, 91, 16, 0, 964, 199, 1, 0, 0, 0, 965, 966, 3, 364, 174, 0, 966, 967, 1, 0, 0, 0, 967, 968, 6, 92, 17, 0, 968, 201, 1, 0, 0, 0, 969, 970, 3, 116, 50, 0, 970, 971, 1, 0, 0, 0, 971, 972, 6, 93, 18, 0, 972, 203, 1, 0, 0, 0, 973, 974, 3, 112, 48, 0, 974, 975, 1, 0, 0, 0, 975, 976, 6, 94, 19, 0, 976, 205, 1, 0, 0, 0, 977, 978, 5, 109, 0, 0, 978, 979, 5, 101, 0, 0, 979, 980, 5, 116, 0, 0, 980, 981, 5, 97, 0, 0, 981, 982, 5, 100, 0, 0, 982, 983, 5, 97, 0, 0, 983, 984, 5, 116, 0, 0, 984, 985, 5, 97, 0, 0, 985, 207, 1, 0, 0, 0, 986, 987, 3, 66, 25, 0, 987, 988, 1, 0, 0, 0, 988, 989, 6, 96, 20, 0, 989, 209, 1, 0, 0, 0, 990, 991, 3, 100, 42, 0, 991, 992, 1, 0, 0, 0, 992, 993, 6, 97, 21, 0, 993, 211, 1, 0, 0, 0, 994, 995, 3, 58, 21, 0, 995, 996, 1, 0, 0, 0, 996, 997, 6, 98, 11, 0, 997, 213, 1, 0, 0, 0, 998, 999, 3, 60, 22, 0, 999, 1000, 1, 0, 0, 0, 1000, 1001, 6, 99, 11, 0, 1001, 215, 1, 0, 0, 0, 1002, 1003, 3, 62, 23, 0, 1003, 1004, 1, 0, 0, 0, 1004, 1005, 6, 100, 11, 0, 1005, 217, 1, 0, 0, 0, 1006, 1007, 3, 78, 31, 0, 1007, 1008, 1, 0, 0, 0, 1008, 1009, 6, 101, 14, 0, 1009, 1010, 6, 101, 15, 0, 1010, 219, 1, 0, 0, 0, 1011, 1012, 3, 120, 52, 0, 1012, 1013, 1, 0, 0, 0, 1013, 1014, 6, 102, 22, 0, 1014, 221, 1, 0, 0, 0, 1015, 1016, 3, 116, 50, 0, 1016, 1017, 1, 0, 0, 0, 1017, 1018, 6, 103, 18, 0, 1018, 223, 1, 0, 0, 0, 1019, 1024, 3, 82, 33, 0, 1020, 1024, 3, 80, 32, 0, 1021, 1024, 3, 96, 40, 0, 1022, 1024, 3, 170, 77, 0, 1023, 1019, 1, 0, 0, 0, 1023, 1020, 1, 0, 0, 0, 1023, 1021, 1, 0, 0, 0, 1023, 1022, 1, 0, 0, 0, 1024, 225, 1, 0, 0, 0, 1025, 1028, 3, 82, 33, 0, 1026, 1028, 3, 170, 77, 0, 1027, 1025, 1, 0, 0, 0, 1027, 1026, 1, 0, 0, 0, 1028, 1032, 1, 0, 0, 0, 1029, 1031, 3, 224, 104, 0, 1030, 1029, 1, 0, 0, 0, 1031, 1034, 1, 0, 0, 0, 1032, 1030, 1, 0, 0, 0, 1032, 1033, 1, 0, 0, 0, 1033, 1045, 1, 0, 0, 0, 1034, 1032, 1, 0, 0, 0, 1035, 1038, 3, 96, 40, 0, 1036, 1038, 3, 90, 37, 0, 1037, 1035, 1, 0, 0, 0, 1037, 1036, 1, 0, 0, 0, 1038, 1040, 1, 0, 0, 0, 1039, 1041, 3, 224, 104, 0, 1040, 1039, 1, 0, 0, 0, 1041, 1042, 1, 0, 0, 0, 1042, 1040, 1, 0, 0, 0, 1042, 1043, 1, 0, 0, 0, 1043, 1045, 1, 0, 0, 0, 1044, 1027, 1, 0, 0, 0, 1044, 1037, 1, 0, 0, 0, 1045, 227, 1, 0, 0, 0, 1046, 1049, 3, 226, 105, 0, 1047, 1049, 3, 184, 84, 0, 1048, 1046, 1, 0, 0, 0, 1048, 1047, 1, 0, 0, 0, 1049, 1050, 1, 0, 0, 0, 1050, 1048, 1, 0, 0, 0, 1050, 1051, 1, 0, 0, 0, 1051, 229, 1, 0, 0, 0, 1052, 1053, 3, 58, 21, 0, 1053, 1054, 1, 0, 0, 0, 1054, 1055, 6, 107, 11, 0, 1055, 231, 1, 0, 0, 0, 1056, 1057, 3, 60, 22, 0, 1057, 1058, 1, 0, 0, 0, 1058, 1059, 6, 108, 11, 0, 1059, 233, 1, 0, 0, 0, 1060, 1061, 3, 62, 23, 0, 1061, 1062, 1, 0, 0, 0, 1062, 1063, 6, 109, 11, 0, 1063, 235, 1, 0, 0, 0, 1064, 1065, 3, 78, 31, 0, 1065, 1066, 1, 0, 0, 0, 1066, 1067, 6, 110, 14, 0, 1067, 1068, 6, 110, 15, 0, 1068, 237, 1, 0, 0, 0, 1069, 1070, 3, 112, 48, 0, 1070, 1071, 1, 0, 0, 0, 1071, 1072, 6, 111, 19, 0, 1072, 239, 1, 0, 0, 0, 1073, 1074, 3, 116, 50, 0, 1074, 1075, 1, 0, 0, 0, 1075, 1076, 6, 112, 18, 0, 1076, 241, 1, 0, 0, 0, 1077, 1078, 3, 120, 52, 0, 1078, 1079, 1, 0, 0, 0, 1079, 1080, 6, 113, 22, 0, 1080, 243, 1, 0, 0, 0, 1081, 1082, 5, 97, 0, 0, 1082, 1083, 5, 115, 0, 0, 1083, 245, 1, 0, 0, 0, 1084, 1085, 3, 228, 106, 0, 1085, 1086, 1, 0, 0, 0, 1086, 1087, 6, 115, 23, 0, 1087, 247, 1, 0, 0, 0, 1088, 1089, 3, 58, 21, 0, 1089, 1090, 1, 0, 0, 0, 1090, 1091, 6, 116, 11, 0, 1091, 249, 1, 0, 0, 0, 1092, 1093, 3, 60, 22, 0, 1093, 1094, 1, 0, 0, 0, 1094, 1095, 6, 117, 11, 0, 1095, 251, 1, 0, 0, 0, 1096, 1097, 3, 62, 23, 0, 1097, 1098, 1, 0, 0, 0, 1098, 1099, 6, 118, 11, 0, 1099, 253, 1, 0, 0, 0, 1100, 1101, 3, 78, 31, 0, 1101, 1102, 1, 0, 0, 0, 1102, 1103, 6, 119, 14, 0, 1103, 1104, 6, 119, 15, 0, 1104, 255, 1, 0, 0, 0, 1105, 1106, 3, 178, 81, 0, 1106, 1107, 1, 0, 0, 0, 1107, 1108, 6, 120, 12, 0, 1108, 1109, 6, 120, 24, 0, 1109, 257, 1, 0, 0, 0, 1110, 1111, 5, 111, 0, 0, 1111, 1112, 5, 110, 0, 0, 1112, 1113, 1, 0, 0, 0, 1113, 1114, 6, 121, 25, 0, 1114, 259, 1, 0, 0, 0, 1115, 1116, 5, 119, 0, 0, 1116, 1117, 5, 105, 0, 0, 1117, 1118, 5, 116, 0, 0, 1118, 1119, 5, 104, 0, 0, 1119, 1120, 1, 0, 0, 0, 1120, 1121, 6, 122, 25, 0, 1121, 261, 1, 0, 0, 0, 1122, 1123, 8, 12, 0, 0, 1123, 263, 1, 0, 0, 0, 1124, 1126, 3, 262, 123, 0, 1125, 1124, 1, 0, 0, 0, 1126, 1127, 1, 0, 0, 0, 1127, 1125, 1, 0, 0, 0, 1127, 1128, 1, 0, 0, 0, 1128, 1129, 1, 0, 0, 0, 1129, 1130, 3, 364, 174, 0, 1130, 1132, 1, 0, 0, 0, 1131, 1125, 1, 0, 0, 0, 1131, 1132, 1, 0, 0, 0, 1132, 1134, 1, 0, 0, 0, 1133, 1135, 3, 262, 123, 0, 1134, 1133, 1, 0, 0, 0, 1135, 1136, 1, 0, 0, 0, 1136, 1134, 1, 0, 0, 0, 1136, 1137, 1, 0, 0, 0, 1137, 265, 1, 0, 0, 0, 1138, 1139, 3, 264, 124, 0, 1139, 1140, 1, 0, 0, 0, 1140, 1141, 6, 125, 26, 0, 1141, 267, 1, 0, 0, 0, 1142, 1143, 3, 58, 21, 0, 1143, 1144, 1, 0, 0, 0, 1144, 1145, 6, 126, 11, 0, 1145, 269, 1, 0, 0, 0, 1146, 1147, 3, 60, 22, 0, 1147, 1148, 1, 0, 0, 0, 1148, 1149, 6, 127, 11, 0, 1149, 271, 1, 0, 0, 0, 1150, 1151, 3, 62, 23, 0, 1151, 1152, 1, 0, 0, 0, 1152, 1153, 6, 128, 11, 0, 1153, 273, 1, 0, 0, 0, 1154, 1155, 3, 78, 31, 0, 1155, 1156, 1, 0, 0, 0, 1156, 1157, 6, 129, 14, 0, 1157, 1158, 6, 129, 15, 0, 1158, 1159, 6, 129, 15, 0, 1159, 275, 1, 0, 0, 0, 1160, 1161, 3, 112, 48, 0, 1161, 1162, 1, 0, 0, 0, 1162, 1163, 6, 130, 19, 0, 1163, 277, 1, 0, 0, 0, 1164, 1165, 3, 116, 50, 0, 1165, 1166, 1, 0, 0, 0, 1166, 1167, 6, 131, 18, 0, 1167, 279, 1, 0, 0, 0, 1168, 1169, 3, 120, 52, 0, 1169, 1170, 1, 0, 0, 0, 1170, 1171, 6, 132, 22, 0, 1171, 281, 1, 0, 0, 0, 1172, 1173, 3, 260, 122, 0, 1173, 1174, 1, 0, 0, 0, 1174, 1175, 6, 133, 27, 0, 1175, 283, 1, 0, 0, 0, 1176, 1177, 3, 228, 106, 0, 1177, 1178, 1, 0, 0, 0, 1178, 1179, 6, 134, 23, 0, 1179, 285, 1, 0, 0, 0, 1180, 1181, 3, 186, 85, 0, 1181, 1182, 1, 0, 0, 0, 1182, 1183, 6, 135, 28, 0, 1183, 287, 1, 0, 0, 0, 1184, 1185, 3, 58, 21, 0, 1185, 1186, 1, 0, 0, 0, 1186, 1187, 6, 136, 11, 0, 1187, 289, 1, 0, 0, 0, 1188, 1189, 3, 60, 22, 0, 1189, 1190, 1, 0, 0, 0, 1190, 1191, 6, 137, 11, 0, 1191, 291, 1, 0, 0, 0, 1192, 1193, 3, 62, 23, 0, 1193, 1194, 1, 0, 0, 0, 1194, 1195, 6, 138, 11, 0, 1195, 293, 1, 0, 0, 0, 1196, 1197, 3, 78, 31, 0, 1197, 1198, 1, 0, 0, 0, 1198, 1199, 6, 139, 14, 0, 1199, 1200, 6, 139, 15, 0, 1200, 295, 1, 0, 0, 0, 1201, 1202, 3, 364, 174, 0, 1202, 1203, 1, 0, 0, 0, 1203, 1204, 6, 140, 17, 0, 1204, 297, 1, 0, 0, 0, 1205, 1206, 3, 116, 50, 0, 1206, 1207, 1, 0, 0, 0, 1207, 1208, 6, 141, 18, 0, 1208, 299, 1, 0, 0, 0, 1209, 1210, 3, 120, 52, 0, 1210, 1211, 1, 0, 0, 0, 1211, 1212, 6, 142, 22, 0, 1212, 301, 1, 0, 0, 0, 1213, 1214, 3, 258, 121, 0, 1214, 1215, 1, 0, 0, 0, 1215, 1216, 6, 143, 29, 0, 1216, 1217, 6, 143, 30, 0, 1217, 303, 1, 0, 0, 0, 1218, 1219, 3, 66, 25, 0, 1219, 1220, 1, 0, 0, 0, 1220, 1221, 6, 144, 20, 0, 1221, 305, 1, 0, 0, 0, 1222, 1223, 3, 100, 42, 0, 1223, 1224, 1, 0, 0, 0, 1224, 1225, 6, 145, 21, 0, 1225, 307, 1, 0, 0, 0, 1226, 1227, 3, 58, 21, 0, 1227, 1228, 1, 0, 0, 0, 1228, 1229, 6, 146, 11, 0, 1229, 309, 1, 0, 0, 0, 1230, 1231, 3, 60, 22, 0, 1231, 1232, 1, 0, 0, 0, 1232, 1233, 6, 147, 11, 0, 1233, 311, 1, 0, 0, 0, 1234, 1235, 3, 62, 23, 0, 1235, 1236, 1, 0, 0, 0, 1236, 1237, 6, 148, 11, 0, 1237, 313, 1, 0, 0, 0, 1238, 1239, 3, 78, 31, 0, 1239, 1240, 1, 0, 0, 0, 1240, 1241, 6, 149, 14, 0, 1241, 1242, 6, 149, 15, 0, 1242, 1243, 6, 149, 15, 0, 1243, 315, 1, 0, 0, 0, 1244, 1245, 3, 116, 50, 0, 1245, 1246, 1, 0, 0, 0, 1246, 1247, 6, 150, 18, 0, 1247, 317, 1, 0, 0, 0, 1248, 1249, 3, 120, 52, 0, 1249, 1250, 1, 0, 0, 0, 1250, 1251, 6, 151, 22, 0, 1251, 319, 1, 0, 0, 0, 1252, 1253, 3, 228, 106, 0, 1253, 1254, 1, 0, 0, 0, 1254, 1255, 6, 152, 23, 0, 1255, 321, 1, 0, 0, 0, 1256, 1257, 3, 58, 21, 0, 1257, 1258, 1, 0, 0, 0, 1258, 1259, 6, 153, 11, 0, 1259, 323, 1, 0, 0, 0, 1260, 1261, 3, 60, 22, 0, 1261, 1262, 1, 0, 0, 0, 1262, 1263, 6, 154, 11, 0, 1263, 325, 1, 0, 0, 0, 1264, 1265, 3, 62, 23, 0, 1265, 1266, 1, 0, 0, 0, 1266, 1267, 6, 155, 11, 0, 1267, 327, 1, 0, 0, 0, 1268, 1269, 3, 78, 31, 0, 1269, 1270, 1, 0, 0, 0, 1270, 1271, 6, 156, 14, 0, 1271, 1272, 6, 156, 15, 0, 1272, 329, 1, 0, 0, 0, 1273, 1274, 3, 120, 52, 0, 1274, 1275, 1, 0, 0, 0, 1275, 1276, 6, 157, 22, 0, 1276, 331, 1, 0, 0, 0, 1277, 1278, 3, 186, 85, 0, 1278, 1279, 1, 0, 0, 0, 1279, 1280, 6, 158, 28, 0, 1280, 333, 1, 0, 0, 0, 1281, 1282, 3, 182, 83, 0, 1282, 1283, 1, 0, 0, 0, 1283, 1284, 6, 159, 31, 0, 1284, 335, 1, 0, 0, 0, 1285, 1286, 3, 58, 21, 0, 1286, 1287, 1, 0, 0, 0, 1287, 1288, 6, 160, 11, 0, 1288, 337, 1, 0, 0, 0, 1289, 1290, 3, 60, 22, 0, 1290, 1291, 1, 0, 0, 0, 1291, 1292, 6, 161, 11, 0, 1292, 339, 1, 0, 0, 0, 1293, 1294, 3, 62, 23, 0, 1294, 1295, 1, 0, 0, 0, 1295, 1296, 6, 162, 11, 0, 1296, 341, 1, 0, 0, 0, 1297, 1298, 3, 78, 31, 0, 1298, 1299, 1, 0, 0, 0, 1299, 1300, 6, 163, 14, 0, 1300, 1301, 6, 163, 15, 0, 1301, 343, 1, 0, 0, 0, 1302, 1303, 5, 105, 0, 0, 1303, 1304, 5, 110, 0, 0, 1304, 1305, 5, 102, 0, 0, 1305, 1306, 5, 111, 0, 0, 1306, 345, 1, 0, 0, 0, 1307, 1308, 3, 58, 21, 0, 1308, 1309, 1, 0, 0, 0, 1309, 1310, 6, 165, 11, 0, 1310, 347, 1, 0, 0, 0, 1311, 1312, 3, 60, 22, 0, 1312, 1313, 1, 0, 0, 0, 1313, 1314, 6, 166, 11, 0, 1314, 349, 1, 0, 0, 0, 1315, 1316, 3, 62, 23, 0, 1316, 1317, 1, 0, 0, 0, 1317, 1318, 6, 167, 11, 0, 1318, 351, 1, 0, 0, 0, 1319, 1320, 3, 78, 31, 0, 1320, 1321, 1, 0, 0, 0, 1321, 1322, 6, 168, 14, 0, 1322, 1323, 6, 168, 15, 0, 1323, 353, 1, 0, 0, 0, 1324, 1325, 5, 102, 0, 0, 1325, 1326, 5, 117, 0, 0, 1326, 1327, 5, 110, 0, 0, 1327, 1328, 5, 99, 0, 0, 1328, 1329, 5, 116, 0, 0, 1329, 1330, 5, 105, 0, 0, 1330, 1331, 5, 111, 0, 0, 1331, 1332, 5, 110, 0, 0, 1332, 1333, 5, 115, 0, 0, 1333, 355, 1, 0, 0, 0, 1334, 1335, 3, 58, 21, 0, 1335, 1336, 1, 0, 0, 0, 1336, 1337, 6, 170, 11, 0, 1337, 357, 1, 0, 0, 0, 1338, 1339, 3, 60, 22, 0, 1339, 1340, 1, 0, 0, 0, 1340, 1341, 6, 171, 11, 0, 1341, 359, 1, 0, 0, 0, 1342, 1343, 3, 62, 23, 0, 1343, 1344, 1, 0, 0, 0, 1344, 1345, 6, 172, 11, 0, 1345, 361, 1, 0, 0, 0, 1346, 1347, 3, 180, 82, 0, 1347, 1348, 1, 0, 0, 0, 1348, 1349, 6, 173, 16, 0, 1349, 1350, 6, 173, 15, 0, 1350, 363, 1, 0, 0, 0, 1351, 1352, 5, 58, 0, 0, 1352, 365, 1, 0, 0, 0, 1353, 1359, 3, 90, 37, 0, 1354, 1359, 3, 80, 32, 0, 1355, 1359, 3, 120, 52, 0, 1356, 1359, 3, 82, 33, 0, 1357, 1359, 3, 96, 40, 0, 1358, 1353, 1, 0, 0, 0, 1358, 1354, 1, 0, 0, 0, 1358, 1355, 1, 0, 0, 0, 1358, 1356, 1, 0, 0, 0, 1358, 1357, 1, 0, 0, 0, 1359, 1360, 1, 0, 0, 0, 1360, 1358, 1, 0, 0, 0, 1360, 1361, 1, 0, 0, 0, 1361, 367, 1, 0, 0, 0, 1362, 1363, 3, 58, 21, 0, 1363, 1364, 1, 0, 0, 0, 1364, 1365, 6, 176, 11, 0, 1365, 369, 1, 0, 0, 0, 1366, 1367, 3, 60, 22, 0, 1367, 1368, 1, 0, 0, 0, 1368, 1369, 6, 177, 11, 0, 1369, 371, 1, 0, 0, 0, 1370, 1371, 3, 62, 23, 0, 1371, 1372, 1, 0, 0, 0, 1372, 1373, 6, 178, 11, 0, 1373, 373, 1, 0, 0, 0, 1374, 1375, 3, 78, 31, 0, 1375, 1376, 1, 0, 0, 0, 1376, 1377, 6, 179, 14, 0, 1377, 1378, 6, 179, 15, 0, 1378, 375, 1, 0, 0, 0, 1379, 1380, 3, 66, 25, 0, 1380, 1381, 1, 0, 0, 0, 1381, 1382, 6, 180, 20, 0, 1382, 1383, 6, 180, 15, 0, 1383, 1384, 6, 180, 32, 0, 1384, 377, 1, 0, 0, 0, 1385, 1386, 3, 100, 42, 0, 1386, 1387, 1, 0, 0, 0, 1387, 1388, 6, 181, 21, 0, 1388, 1389, 6, 181, 15, 0, 1389, 1390, 6, 181, 32, 0, 1390, 379, 1, 0, 0, 0, 1391, 1392, 3, 58, 21, 0, 1392, 1393, 1, 0, 0, 0, 1393, 1394, 6, 182, 11, 0, 1394, 381, 1, 0, 0, 0, 1395, 1396, 3, 60, 22, 0, 1396, 1397, 1, 0, 0, 0, 1397, 1398, 6, 183, 11, 0, 1398, 383, 1, 0, 0, 0, 1399, 1400, 3, 62, 23, 0, 1400, 1401, 1, 0, 0, 0, 1401, 1402, 6, 184, 11, 0, 1402, 385, 1, 0, 0, 0, 1403, 1404, 3, 364, 174, 0, 1404, 1405, 1, 0, 0, 0, 1405, 1406, 6, 185, 17, 0, 1406, 1407, 6, 185, 15, 0, 1407, 1408, 6, 185, 7, 0, 1408, 387, 1, 0, 0, 0, 1409, 1410, 3, 116, 50, 0, 1410, 1411, 1, 0, 0, 0, 1411, 1412, 6, 186, 18, 0, 1412, 1413, 6, 186, 15, 0, 1413, 1414, 6, 186, 7, 0, 1414, 389, 1, 0, 0, 0, 1415, 1416, 3, 58, 21, 0, 1416, 1417, 1, 0, 0, 0, 1417, 1418, 6, 187, 11, 0, 1418, 391, 1, 0, 0, 0, 1419, 1420, 3, 60, 22, 0, 1420, 1421, 1, 0, 0, 0, 1421, 1422, 6, 188, 11, 0, 1422, 393, 1, 0, 0, 0, 1423, 1424, 3, 62, 23, 0, 1424, 1425, 1, 0, 0, 0, 1425, 1426, 6, 189, 11, 0, 1426, 395, 1, 0, 0, 0, 1427, 1428, 3, 186, 85, 0, 1428, 1429, 1, 0, 0, 0, 1429, 1430, 6, 190, 15, 0, 1430, 1431, 6, 190, 0, 0, 1431, 1432, 6, 190, 28, 0, 1432, 397, 1, 0, 0, 0, 1433, 1434, 3, 182, 83, 0, 1434, 1435, 1, 0, 0, 0, 1435, 1436, 6, 191, 15, 0, 1436, 1437, 6, 191, 0, 0, 1437, 1438, 6, 191, 31, 0, 1438, 399, 1, 0, 0, 0, 1439, 1440, 3, 106, 45, 0, 1440, 1441, 1, 0, 0, 0, 1441, 1442, 6, 192, 15, 0, 1442, 1443, 6, 192, 0, 0, 1443, 1444, 6, 192, 33, 0, 1444, 401, 1, 0, 0, 0, 1445, 1446, 3, 78, 31, 0, 1446, 1447, 1, 0, 0, 0, 1447, 1448, 6, 193, 14, 0, 1448, 1449, 6, 193, 15, 0, 1449, 403, 1, 0, 0, 0, 65, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 576, 586, 590, 593, 602, 604, 615, 622, 627, 666, 671, 680, 687, 692, 694, 705, 713, 716, 718, 723, 728, 734, 741, 746, 752, 755, 763, 767, 891, 898, 900, 916, 921, 926, 928, 934, 1023, 1027, 1032, 1037, 1042, 1044, 1048, 1050, 1127, 1131, 1136, 1358, 1360, 34, 5, 2, 0, 5, 4, 0, 5, 6, 0, 5, 1, 0, 5, 3, 0, 5, 8, 0, 5, 12, 0, 5, 14, 0, 5, 10, 0, 5, 5, 0, 5, 11, 0, 0, 1, 0, 7, 69, 0, 5, 0, 0, 7, 29, 0, 4, 0, 0, 7, 70, 0, 7, 114, 0, 7, 38, 0, 7, 36, 0, 7, 25, 0, 7, 30, 0, 7, 40, 0, 7, 80, 0, 5, 13, 0, 5, 7, 0, 7, 90, 0, 7, 89, 0, 7, 72, 0, 7, 88, 0, 5, 9, 0, 7, 71, 0, 5, 15, 0, 7, 33, 0] \ No newline at end of file diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java index 1511be73d40e1..de837d1764791 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java @@ -20,29 +20,28 @@ public class EsqlBaseLexer extends Lexer { DISSECT=1, DROP=2, ENRICH=3, EVAL=4, EXPLAIN=5, FROM=6, GROK=7, INLINESTATS=8, KEEP=9, LIMIT=10, LOOKUP=11, META=12, METRICS=13, MV_EXPAND=14, RENAME=15, ROW=16, SHOW=17, SORT=18, STATS=19, WHERE=20, UNKNOWN_CMD=21, LINE_COMMENT=22, - MULTILINE_COMMENT=23, WS=24, INDEX_UNQUOTED_IDENTIFIER=25, EXPLAIN_WS=26, - EXPLAIN_LINE_COMMENT=27, EXPLAIN_MULTILINE_COMMENT=28, PIPE=29, QUOTED_STRING=30, - INTEGER_LITERAL=31, DECIMAL_LITERAL=32, BY=33, AND=34, ASC=35, ASSIGN=36, - CAST_OP=37, COMMA=38, DESC=39, DOT=40, FALSE=41, FIRST=42, LAST=43, LP=44, - IN=45, IS=46, LIKE=47, NOT=48, NULL=49, NULLS=50, OR=51, PARAM=52, RLIKE=53, - RP=54, TRUE=55, EQ=56, CIEQ=57, NEQ=58, LT=59, LTE=60, GT=61, GTE=62, - PLUS=63, MINUS=64, ASTERISK=65, SLASH=66, PERCENT=67, NAMED_OR_POSITIONAL_PARAM=68, - OPENING_BRACKET=69, CLOSING_BRACKET=70, UNQUOTED_IDENTIFIER=71, QUOTED_IDENTIFIER=72, - EXPR_LINE_COMMENT=73, EXPR_MULTILINE_COMMENT=74, EXPR_WS=75, METADATA=76, - FROM_LINE_COMMENT=77, FROM_MULTILINE_COMMENT=78, FROM_WS=79, ID_PATTERN=80, - PROJECT_LINE_COMMENT=81, PROJECT_MULTILINE_COMMENT=82, PROJECT_WS=83, - AS=84, RENAME_LINE_COMMENT=85, RENAME_MULTILINE_COMMENT=86, RENAME_WS=87, - ON=88, WITH=89, ENRICH_POLICY_NAME=90, ENRICH_LINE_COMMENT=91, ENRICH_MULTILINE_COMMENT=92, - ENRICH_WS=93, ENRICH_FIELD_LINE_COMMENT=94, ENRICH_FIELD_MULTILINE_COMMENT=95, - ENRICH_FIELD_WS=96, LOOKUP_LINE_COMMENT=97, LOOKUP_MULTILINE_COMMENT=98, - LOOKUP_WS=99, LOOKUP_FIELD_LINE_COMMENT=100, LOOKUP_FIELD_MULTILINE_COMMENT=101, - LOOKUP_FIELD_WS=102, MVEXPAND_LINE_COMMENT=103, MVEXPAND_MULTILINE_COMMENT=104, - MVEXPAND_WS=105, INFO=106, SHOW_LINE_COMMENT=107, SHOW_MULTILINE_COMMENT=108, - SHOW_WS=109, FUNCTIONS=110, META_LINE_COMMENT=111, META_MULTILINE_COMMENT=112, - META_WS=113, COLON=114, SETTING=115, SETTING_LINE_COMMENT=116, SETTTING_MULTILINE_COMMENT=117, - SETTING_WS=118, METRICS_LINE_COMMENT=119, METRICS_MULTILINE_COMMENT=120, - METRICS_WS=121, CLOSING_METRICS_LINE_COMMENT=122, CLOSING_METRICS_MULTILINE_COMMENT=123, - CLOSING_METRICS_WS=124; + MULTILINE_COMMENT=23, WS=24, UNQUOTED_SOURCE=25, EXPLAIN_WS=26, EXPLAIN_LINE_COMMENT=27, + EXPLAIN_MULTILINE_COMMENT=28, PIPE=29, QUOTED_STRING=30, INTEGER_LITERAL=31, + DECIMAL_LITERAL=32, BY=33, AND=34, ASC=35, ASSIGN=36, CAST_OP=37, COMMA=38, + DESC=39, DOT=40, FALSE=41, FIRST=42, LAST=43, LP=44, IN=45, IS=46, LIKE=47, + NOT=48, NULL=49, NULLS=50, OR=51, PARAM=52, RLIKE=53, RP=54, TRUE=55, + EQ=56, CIEQ=57, NEQ=58, LT=59, LTE=60, GT=61, GTE=62, PLUS=63, MINUS=64, + ASTERISK=65, SLASH=66, PERCENT=67, NAMED_OR_POSITIONAL_PARAM=68, OPENING_BRACKET=69, + CLOSING_BRACKET=70, UNQUOTED_IDENTIFIER=71, QUOTED_IDENTIFIER=72, EXPR_LINE_COMMENT=73, + EXPR_MULTILINE_COMMENT=74, EXPR_WS=75, METADATA=76, FROM_LINE_COMMENT=77, + FROM_MULTILINE_COMMENT=78, FROM_WS=79, ID_PATTERN=80, PROJECT_LINE_COMMENT=81, + PROJECT_MULTILINE_COMMENT=82, PROJECT_WS=83, AS=84, RENAME_LINE_COMMENT=85, + RENAME_MULTILINE_COMMENT=86, RENAME_WS=87, ON=88, WITH=89, ENRICH_POLICY_NAME=90, + ENRICH_LINE_COMMENT=91, ENRICH_MULTILINE_COMMENT=92, ENRICH_WS=93, ENRICH_FIELD_LINE_COMMENT=94, + ENRICH_FIELD_MULTILINE_COMMENT=95, ENRICH_FIELD_WS=96, LOOKUP_LINE_COMMENT=97, + LOOKUP_MULTILINE_COMMENT=98, LOOKUP_WS=99, LOOKUP_FIELD_LINE_COMMENT=100, + LOOKUP_FIELD_MULTILINE_COMMENT=101, LOOKUP_FIELD_WS=102, MVEXPAND_LINE_COMMENT=103, + MVEXPAND_MULTILINE_COMMENT=104, MVEXPAND_WS=105, INFO=106, SHOW_LINE_COMMENT=107, + SHOW_MULTILINE_COMMENT=108, SHOW_WS=109, FUNCTIONS=110, META_LINE_COMMENT=111, + META_MULTILINE_COMMENT=112, META_WS=113, COLON=114, SETTING=115, SETTING_LINE_COMMENT=116, + SETTTING_MULTILINE_COMMENT=117, SETTING_WS=118, METRICS_LINE_COMMENT=119, + METRICS_MULTILINE_COMMENT=120, METRICS_WS=121, CLOSING_METRICS_LINE_COMMENT=122, + CLOSING_METRICS_MULTILINE_COMMENT=123, CLOSING_METRICS_WS=124; public static final int EXPLAIN_MODE=1, EXPRESSION_MODE=2, FROM_MODE=3, PROJECT_MODE=4, RENAME_MODE=5, ENRICH_MODE=6, ENRICH_FIELD_MODE=7, LOOKUP_MODE=8, LOOKUP_FIELD_MODE=9, @@ -64,7 +63,7 @@ private static String[] makeRuleNames() { "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", "INLINESTATS", "KEEP", "LIMIT", "LOOKUP", "META", "METRICS", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", "WHERE", "UNKNOWN_CMD", "LINE_COMMENT", - "MULTILINE_COMMENT", "WS", "INDEX_UNQUOTED_IDENTIFIER_PART", "INDEX_UNQUOTED_IDENTIFIER", + "MULTILINE_COMMENT", "WS", "UNQUOTED_SOURCE_PART", "UNQUOTED_SOURCE", "EXPLAIN_OPENING_BRACKET", "EXPLAIN_PIPE", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", "PIPE", "DIGIT", "LETTER", "ESCAPE_SEQUENCE", "UNESCAPED_CHARS", "EXPONENT", "ASPERAND", "BACKQUOTE", "BACKQUOTE_BLOCK", @@ -76,19 +75,19 @@ private static String[] makeRuleNames() { "PERCENT", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", "CLOSING_BRACKET", "UNQUOTED_IDENTIFIER", "QUOTED_ID", "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", "EXPR_WS", "FROM_PIPE", "FROM_OPENING_BRACKET", - "FROM_CLOSING_BRACKET", "FROM_COMMA", "FROM_ASSIGN", "FROM_QUOTED_STRING", - "METADATA", "FROM_INDEX_UNQUOTED_IDENTIFIER", "FROM_LINE_COMMENT", "FROM_MULTILINE_COMMENT", + "FROM_CLOSING_BRACKET", "FROM_COLON", "FROM_COMMA", "FROM_ASSIGN", "METADATA", + "FROM_UNQUOTED_SOURCE", "FROM_QUOTED_SOURCE", "FROM_LINE_COMMENT", "FROM_MULTILINE_COMMENT", "FROM_WS", "PROJECT_PIPE", "PROJECT_DOT", "PROJECT_COMMA", "UNQUOTED_ID_BODY_WITH_PATTERN", "UNQUOTED_ID_PATTERN", "ID_PATTERN", "PROJECT_LINE_COMMENT", "PROJECT_MULTILINE_COMMENT", "PROJECT_WS", "RENAME_PIPE", "RENAME_ASSIGN", "RENAME_COMMA", "RENAME_DOT", "AS", "RENAME_ID_PATTERN", "RENAME_LINE_COMMENT", "RENAME_MULTILINE_COMMENT", "RENAME_WS", "ENRICH_PIPE", "ENRICH_OPENING_BRACKET", "ON", "WITH", "ENRICH_POLICY_NAME_BODY", - "ENRICH_POLICY_NAME", "ENRICH_QUOTED_IDENTIFIER", "ENRICH_MODE_UNQUOTED_VALUE", - "ENRICH_LINE_COMMENT", "ENRICH_MULTILINE_COMMENT", "ENRICH_WS", "ENRICH_FIELD_PIPE", - "ENRICH_FIELD_ASSIGN", "ENRICH_FIELD_COMMA", "ENRICH_FIELD_DOT", "ENRICH_FIELD_WITH", - "ENRICH_FIELD_ID_PATTERN", "ENRICH_FIELD_QUOTED_IDENTIFIER", "ENRICH_FIELD_LINE_COMMENT", - "ENRICH_FIELD_MULTILINE_COMMENT", "ENRICH_FIELD_WS", "LOOKUP_PIPE", "LOOKUP_COMMA", - "LOOKUP_DOT", "LOOKUP_ON", "LOOKUP_INDEX_UNQUOTED_IDENTIFIER", "LOOKUP_LINE_COMMENT", + "ENRICH_POLICY_NAME", "ENRICH_MODE_UNQUOTED_VALUE", "ENRICH_LINE_COMMENT", + "ENRICH_MULTILINE_COMMENT", "ENRICH_WS", "ENRICH_FIELD_PIPE", "ENRICH_FIELD_ASSIGN", + "ENRICH_FIELD_COMMA", "ENRICH_FIELD_DOT", "ENRICH_FIELD_WITH", "ENRICH_FIELD_ID_PATTERN", + "ENRICH_FIELD_QUOTED_IDENTIFIER", "ENRICH_FIELD_LINE_COMMENT", "ENRICH_FIELD_MULTILINE_COMMENT", + "ENRICH_FIELD_WS", "LOOKUP_PIPE", "LOOKUP_COLON", "LOOKUP_COMMA", "LOOKUP_DOT", + "LOOKUP_ON", "LOOKUP_UNQUOTED_SOURCE", "LOOKUP_QUOTED_SOURCE", "LOOKUP_LINE_COMMENT", "LOOKUP_MULTILINE_COMMENT", "LOOKUP_WS", "LOOKUP_FIELD_PIPE", "LOOKUP_FIELD_COMMA", "LOOKUP_FIELD_DOT", "LOOKUP_FIELD_ID_PATTERN", "LOOKUP_FIELD_LINE_COMMENT", "LOOKUP_FIELD_MULTILINE_COMMENT", "LOOKUP_FIELD_WS", "MVEXPAND_PIPE", @@ -97,11 +96,11 @@ private static String[] makeRuleNames() { "SHOW_PIPE", "INFO", "SHOW_LINE_COMMENT", "SHOW_MULTILINE_COMMENT", "SHOW_WS", "META_PIPE", "FUNCTIONS", "META_LINE_COMMENT", "META_MULTILINE_COMMENT", "META_WS", "SETTING_CLOSING_BRACKET", "COLON", "SETTING", "SETTING_LINE_COMMENT", - "SETTTING_MULTILINE_COMMENT", "SETTING_WS", "METRICS_PIPE", "METRICS_INDEX_UNQUOTED_IDENTIFIER", - "METRICS_LINE_COMMENT", "METRICS_MULTILINE_COMMENT", "METRICS_WS", "CLOSING_METRICS_COMMA", - "CLOSING_METRICS_LINE_COMMENT", "CLOSING_METRICS_MULTILINE_COMMENT", - "CLOSING_METRICS_WS", "CLOSING_METRICS_QUOTED_IDENTIFIER", "CLOSING_METRICS_UNQUOTED_IDENTIFIER", - "CLOSING_METRICS_BY", "CLOSING_METRICS_PIPE" + "SETTTING_MULTILINE_COMMENT", "SETTING_WS", "METRICS_PIPE", "METRICS_UNQUOTED_SOURCE", + "METRICS_QUOTED_SOURCE", "METRICS_LINE_COMMENT", "METRICS_MULTILINE_COMMENT", + "METRICS_WS", "CLOSING_METRICS_COLON", "CLOSING_METRICS_COMMA", "CLOSING_METRICS_LINE_COMMENT", + "CLOSING_METRICS_MULTILINE_COMMENT", "CLOSING_METRICS_WS", "CLOSING_METRICS_QUOTED_IDENTIFIER", + "CLOSING_METRICS_UNQUOTED_IDENTIFIER", "CLOSING_METRICS_BY", "CLOSING_METRICS_PIPE" }; } public static final String[] ruleNames = makeRuleNames(); @@ -129,11 +128,11 @@ private static String[] makeSymbolicNames() { null, "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", "INLINESTATS", "KEEP", "LIMIT", "LOOKUP", "META", "METRICS", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", "WHERE", "UNKNOWN_CMD", "LINE_COMMENT", - "MULTILINE_COMMENT", "WS", "INDEX_UNQUOTED_IDENTIFIER", "EXPLAIN_WS", - "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", "PIPE", "QUOTED_STRING", - "INTEGER_LITERAL", "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", - "COMMA", "DESC", "DOT", "FALSE", "FIRST", "LAST", "LP", "IN", "IS", "LIKE", - "NOT", "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", "EQ", "CIEQ", + "MULTILINE_COMMENT", "WS", "UNQUOTED_SOURCE", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", + "EXPLAIN_MULTILINE_COMMENT", "PIPE", "QUOTED_STRING", "INTEGER_LITERAL", + "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", "COMMA", + "DESC", "DOT", "FALSE", "FIRST", "LAST", "LP", "IN", "IS", "LIKE", "NOT", + "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", "EQ", "CIEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", "CLOSING_BRACKET", "UNQUOTED_IDENTIFIER", "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", @@ -213,7 +212,7 @@ public EsqlBaseLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\u0004\u0000|\u058e\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ + "\u0004\u0000|\u05aa\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ @@ -267,857 +266,876 @@ public EsqlBaseLexer(CharStream input) { "\u0002\u00b4\u0007\u00b4\u0002\u00b5\u0007\u00b5\u0002\u00b6\u0007\u00b6"+ "\u0002\u00b7\u0007\u00b7\u0002\u00b8\u0007\u00b8\u0002\u00b9\u0007\u00b9"+ "\u0002\u00ba\u0007\u00ba\u0002\u00bb\u0007\u00bb\u0002\u00bc\u0007\u00bc"+ - "\u0002\u00bd\u0007\u00bd\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000"+ + "\u0002\u00bd\u0007\u00bd\u0002\u00be\u0007\u00be\u0002\u00bf\u0007\u00bf"+ + "\u0002\u00c0\u0007\u00c0\u0002\u00c1\u0007\u00c1\u0001\u0000\u0001\u0000"+ "\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000"+ - "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002"+ - "\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003"+ - "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004"+ - "\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ - "\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006"+ - "\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007"+ - "\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007"+ + "\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002\u0001\u0002"+ + "\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ + "\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0005"+ + "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005"+ + "\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006"+ + "\u0001\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007"+ "\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007"+ - "\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001"+ - "\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001"+ - "\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\u000b"+ - "\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b"+ - "\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001"+ - "\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001"+ - "\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001\u000e"+ + "\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001"+ + "\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001"+ + "\t\u0001\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001"+ + "\n\u0001\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b"+ + "\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001"+ + "\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001\r\u0001"+ + "\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\u000e"+ "\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e"+ - "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f"+ - "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010"+ - "\u0001\u0010\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011"+ - "\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001\u0013"+ + "\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f"+ + "\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010"+ + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0001\u0011"+ + "\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012"+ + "\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ "\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013"+ - "\u0001\u0014\u0004\u0014\u0237\b\u0014\u000b\u0014\f\u0014\u0238\u0001"+ - "\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0005"+ - "\u0015\u0241\b\u0015\n\u0015\f\u0015\u0244\t\u0015\u0001\u0015\u0003\u0015"+ - "\u0247\b\u0015\u0001\u0015\u0003\u0015\u024a\b\u0015\u0001\u0015\u0001"+ - "\u0015\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0005"+ - "\u0016\u0253\b\u0016\n\u0016\f\u0016\u0256\t\u0016\u0001\u0016\u0001\u0016"+ - "\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0017\u0004\u0017\u025e\b\u0017"+ - "\u000b\u0017\f\u0017\u025f\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018"+ - "\u0001\u0018\u0003\u0018\u0267\b\u0018\u0001\u0019\u0004\u0019\u026a\b"+ - "\u0019\u000b\u0019\f\u0019\u026b\u0001\u001a\u0001\u001a\u0001\u001a\u0001"+ - "\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001"+ - "\u001b\u0001\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0001\u001d\u0001"+ - "\u001d\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001\u001e\u0001"+ - "\u001e\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001 \u0001 \u0001"+ - "!\u0001!\u0001\"\u0001\"\u0001\"\u0001#\u0001#\u0001$\u0001$\u0003$\u0293"+ - "\b$\u0001$\u0004$\u0296\b$\u000b$\f$\u0297\u0001%\u0001%\u0001&\u0001"+ - "&\u0001\'\u0001\'\u0001\'\u0003\'\u02a1\b\'\u0001(\u0001(\u0001)\u0001"+ - ")\u0001)\u0003)\u02a8\b)\u0001*\u0001*\u0001*\u0005*\u02ad\b*\n*\f*\u02b0"+ - "\t*\u0001*\u0001*\u0001*\u0001*\u0001*\u0001*\u0005*\u02b8\b*\n*\f*\u02bb"+ - "\t*\u0001*\u0001*\u0001*\u0001*\u0001*\u0003*\u02c2\b*\u0001*\u0003*\u02c5"+ - "\b*\u0003*\u02c7\b*\u0001+\u0004+\u02ca\b+\u000b+\f+\u02cb\u0001,\u0004"+ - ",\u02cf\b,\u000b,\f,\u02d0\u0001,\u0001,\u0005,\u02d5\b,\n,\f,\u02d8\t"+ - ",\u0001,\u0001,\u0004,\u02dc\b,\u000b,\f,\u02dd\u0001,\u0004,\u02e1\b"+ - ",\u000b,\f,\u02e2\u0001,\u0001,\u0005,\u02e7\b,\n,\f,\u02ea\t,\u0003,"+ - "\u02ec\b,\u0001,\u0001,\u0001,\u0001,\u0004,\u02f2\b,\u000b,\f,\u02f3"+ - "\u0001,\u0001,\u0003,\u02f8\b,\u0001-\u0001-\u0001-\u0001.\u0001.\u0001"+ - ".\u0001.\u0001/\u0001/\u0001/\u0001/\u00010\u00010\u00011\u00011\u0001"+ - "1\u00012\u00012\u00013\u00013\u00013\u00013\u00013\u00014\u00014\u0001"+ - "5\u00015\u00015\u00015\u00015\u00015\u00016\u00016\u00016\u00016\u0001"+ - "6\u00016\u00017\u00017\u00017\u00017\u00017\u00018\u00018\u00019\u0001"+ - "9\u00019\u0001:\u0001:\u0001:\u0001;\u0001;\u0001;\u0001;\u0001;\u0001"+ - "<\u0001<\u0001<\u0001<\u0001=\u0001=\u0001=\u0001=\u0001=\u0001>\u0001"+ - ">\u0001>\u0001>\u0001>\u0001>\u0001?\u0001?\u0001?\u0001@\u0001@\u0001"+ - "A\u0001A\u0001A\u0001A\u0001A\u0001A\u0001B\u0001B\u0001C\u0001C\u0001"+ - "C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001E\u0001E\u0001E\u0001F\u0001"+ - "F\u0001F\u0001G\u0001G\u0001H\u0001H\u0001H\u0001I\u0001I\u0001J\u0001"+ - "J\u0001J\u0001K\u0001K\u0001L\u0001L\u0001M\u0001M\u0001N\u0001N\u0001"+ - "O\u0001O\u0001P\u0001P\u0001P\u0005P\u0372\bP\nP\fP\u0375\tP\u0001P\u0001"+ - "P\u0004P\u0379\bP\u000bP\fP\u037a\u0003P\u037d\bP\u0001Q\u0001Q\u0001"+ - "Q\u0001Q\u0001Q\u0001R\u0001R\u0001R\u0001R\u0001R\u0001S\u0001S\u0005"+ - "S\u038b\bS\nS\fS\u038e\tS\u0001S\u0001S\u0003S\u0392\bS\u0001S\u0004S"+ - "\u0395\bS\u000bS\fS\u0396\u0003S\u0399\bS\u0001T\u0001T\u0004T\u039d\b"+ - "T\u000bT\fT\u039e\u0001T\u0001T\u0001U\u0001U\u0001V\u0001V\u0001V\u0001"+ - "V\u0001W\u0001W\u0001W\u0001W\u0001X\u0001X\u0001X\u0001X\u0001Y\u0001"+ - "Y\u0001Y\u0001Y\u0001Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001[\u0001[\u0001"+ - "[\u0001[\u0001\\\u0001\\\u0001\\\u0001\\\u0001]\u0001]\u0001]\u0001]\u0001"+ - "^\u0001^\u0001^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001_\u0001_\u0001"+ - "_\u0001_\u0001_\u0001`\u0001`\u0001`\u0001`\u0001a\u0001a\u0001a\u0001"+ - "a\u0001b\u0001b\u0001b\u0001b\u0001c\u0001c\u0001c\u0001c\u0001d\u0001"+ - "d\u0001d\u0001d\u0001d\u0001e\u0001e\u0001e\u0001e\u0001f\u0001f\u0001"+ - "f\u0001f\u0001g\u0001g\u0001g\u0001g\u0003g\u03f4\bg\u0001h\u0001h\u0003"+ - "h\u03f8\bh\u0001h\u0005h\u03fb\bh\nh\fh\u03fe\th\u0001h\u0001h\u0003h"+ - "\u0402\bh\u0001h\u0004h\u0405\bh\u000bh\fh\u0406\u0003h\u0409\bh\u0001"+ - "i\u0001i\u0004i\u040d\bi\u000bi\fi\u040e\u0001j\u0001j\u0001j\u0001j\u0001"+ - "k\u0001k\u0001k\u0001k\u0001l\u0001l\u0001l\u0001l\u0001m\u0001m\u0001"+ - "m\u0001m\u0001m\u0001n\u0001n\u0001n\u0001n\u0001o\u0001o\u0001o\u0001"+ - "o\u0001p\u0001p\u0001p\u0001p\u0001q\u0001q\u0001q\u0001r\u0001r\u0001"+ - "r\u0001r\u0001s\u0001s\u0001s\u0001s\u0001t\u0001t\u0001t\u0001t\u0001"+ - "u\u0001u\u0001u\u0001u\u0001v\u0001v\u0001v\u0001v\u0001v\u0001w\u0001"+ - "w\u0001w\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0001x\u0001y\u0001"+ - "y\u0001y\u0001y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001{\u0004{\u045a"+ - "\b{\u000b{\f{\u045b\u0001{\u0001{\u0003{\u0460\b{\u0001{\u0004{\u0463"+ - "\b{\u000b{\f{\u0464\u0001|\u0001|\u0001|\u0001|\u0001}\u0001}\u0001}\u0001"+ - "}\u0001~\u0001~\u0001~\u0001~\u0001\u007f\u0001\u007f\u0001\u007f\u0001"+ - "\u007f\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0081\u0001"+ - "\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0082\u0001"+ - "\u0082\u0001\u0082\u0001\u0082\u0001\u0083\u0001\u0083\u0001\u0083\u0001"+ - "\u0083\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0085\u0001"+ - "\u0085\u0001\u0085\u0001\u0085\u0001\u0086\u0001\u0086\u0001\u0086\u0001"+ - "\u0086\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0088\u0001"+ - "\u0088\u0001\u0088\u0001\u0088\u0001\u0089\u0001\u0089\u0001\u0089\u0001"+ - "\u0089\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008b\u0001"+ - "\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008c\u0001\u008c\u0001"+ - "\u008c\u0001\u008c\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008d\u0001"+ - "\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008f\u0001"+ - "\u008f\u0001\u008f\u0001\u008f\u0001\u0090\u0001\u0090\u0001\u0090\u0001"+ - "\u0090\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0092\u0001"+ - "\u0092\u0001\u0092\u0001\u0092\u0001\u0093\u0001\u0093\u0001\u0093\u0001"+ - "\u0093\u0001\u0093\u0001\u0093\u0001\u0094\u0001\u0094\u0001\u0094\u0001"+ - "\u0094\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0096\u0001"+ - "\u0096\u0001\u0096\u0001\u0096\u0001\u0097\u0001\u0097\u0001\u0097\u0001"+ - "\u0097\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0099\u0001"+ - "\u0099\u0001\u0099\u0001\u0099\u0001\u009a\u0001\u009a\u0001\u009a\u0001"+ - "\u009a\u0001\u009a\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009b\u0001"+ - "\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009d\u0001\u009d\u0001"+ - "\u009d\u0001\u009d\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001"+ - "\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u00a0\u0001\u00a0\u0001"+ - "\u00a0\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001"+ - "\u00a1\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001"+ - "\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a4\u0001\u00a4\u0001"+ - "\u00a4\u0001\u00a4\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001"+ - "\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a7\u0001"+ - "\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001"+ - "\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001"+ - "\u00a8\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00aa\u0001"+ - "\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001"+ - "\u00ab\u0001\u00ab\u0001\u00ac\u0001\u00ac\u0001\u00ad\u0001\u00ad\u0001"+ - "\u00ad\u0001\u00ad\u0001\u00ad\u0004\u00ad\u053f\b\u00ad\u000b\u00ad\f"+ - "\u00ad\u0540\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00af"+ - "\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00b0\u0001\u00b0\u0001\u00b0"+ - "\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1"+ - "\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2"+ - "\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b4\u0001\u00b4"+ - "\u0001\u00b4\u0001\u00b4\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5"+ - "\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6"+ - "\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b8\u0001\u00b8"+ - "\u0001\u00b8\u0001\u00b8\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9"+ - "\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba"+ - "\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb"+ - "\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc"+ - "\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0002\u0254"+ - "\u02b9\u0000\u00be\u0010\u0001\u0012\u0002\u0014\u0003\u0016\u0004\u0018"+ - "\u0005\u001a\u0006\u001c\u0007\u001e\b \t\"\n$\u000b&\f(\r*\u000e,\u000f"+ - ".\u00100\u00112\u00124\u00136\u00148\u0015:\u0016<\u0017>\u0018@\u0000"+ - "B\u0019D\u0000F\u0000H\u001aJ\u001bL\u001cN\u001dP\u0000R\u0000T\u0000"+ - "V\u0000X\u0000Z\u0000\\\u0000^\u0000`\u0000b\u0000d\u001ef\u001fh j!l"+ - "\"n#p$r%t&v\'x(z)|*~+\u0080,\u0082-\u0084.\u0086/\u00880\u008a1\u008c"+ - "2\u008e3\u00904\u00925\u00946\u00967\u00988\u009a9\u009c:\u009e;\u00a0"+ - "<\u00a2=\u00a4>\u00a6?\u00a8@\u00aaA\u00acB\u00aeC\u00b0D\u00b2E\u00b4"+ - "F\u00b6G\u00b8\u0000\u00baH\u00bcI\u00beJ\u00c0K\u00c2\u0000\u00c4\u0000"+ - "\u00c6\u0000\u00c8\u0000\u00ca\u0000\u00cc\u0000\u00ceL\u00d0\u0000\u00d2"+ - "M\u00d4N\u00d6O\u00d8\u0000\u00da\u0000\u00dc\u0000\u00de\u0000\u00e0"+ - "\u0000\u00e2P\u00e4Q\u00e6R\u00e8S\u00ea\u0000\u00ec\u0000\u00ee\u0000"+ - "\u00f0\u0000\u00f2T\u00f4\u0000\u00f6U\u00f8V\u00faW\u00fc\u0000\u00fe"+ - "\u0000\u0100X\u0102Y\u0104\u0000\u0106Z\u0108\u0000\u010a\u0000\u010c"+ - "[\u010e\\\u0110]\u0112\u0000\u0114\u0000\u0116\u0000\u0118\u0000\u011a"+ - "\u0000\u011c\u0000\u011e\u0000\u0120^\u0122_\u0124`\u0126\u0000\u0128"+ - "\u0000\u012a\u0000\u012c\u0000\u012e\u0000\u0130a\u0132b\u0134c\u0136"+ - "\u0000\u0138\u0000\u013a\u0000\u013c\u0000\u013ed\u0140e\u0142f\u0144"+ - "\u0000\u0146\u0000\u0148\u0000\u014a\u0000\u014cg\u014eh\u0150i\u0152"+ - "\u0000\u0154j\u0156k\u0158l\u015am\u015c\u0000\u015en\u0160o\u0162p\u0164"+ - "q\u0166\u0000\u0168r\u016as\u016ct\u016eu\u0170v\u0172\u0000\u0174\u0000"+ - "\u0176w\u0178x\u017ay\u017c\u0000\u017ez\u0180{\u0182|\u0184\u0000\u0186"+ - "\u0000\u0188\u0000\u018a\u0000\u0010\u0000\u0001\u0002\u0003\u0004\u0005"+ + "\u0001\u0013\u0001\u0013\u0001\u0014\u0004\u0014\u023f\b\u0014\u000b\u0014"+ + "\f\u0014\u0240\u0001\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015"+ + "\u0001\u0015\u0005\u0015\u0249\b\u0015\n\u0015\f\u0015\u024c\t\u0015\u0001"+ + "\u0015\u0003\u0015\u024f\b\u0015\u0001\u0015\u0003\u0015\u0252\b\u0015"+ + "\u0001\u0015\u0001\u0015\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016"+ + "\u0001\u0016\u0005\u0016\u025b\b\u0016\n\u0016\f\u0016\u025e\t\u0016\u0001"+ + "\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0017\u0004"+ + "\u0017\u0266\b\u0017\u000b\u0017\f\u0017\u0267\u0001\u0017\u0001\u0017"+ + "\u0001\u0018\u0001\u0018\u0001\u0018\u0003\u0018\u026f\b\u0018\u0001\u0019"+ + "\u0004\u0019\u0272\b\u0019\u000b\u0019\f\u0019\u0273\u0001\u001a\u0001"+ + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001"+ + "\u001b\u0001\u001b\u0001\u001b\u0001\u001c\u0001\u001c\u0001\u001c\u0001"+ + "\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001e\u0001"+ + "\u001e\u0001\u001e\u0001\u001e\u0001\u001f\u0001\u001f\u0001\u001f\u0001"+ + "\u001f\u0001 \u0001 \u0001!\u0001!\u0001\"\u0001\"\u0001\"\u0001#\u0001"+ + "#\u0001$\u0001$\u0003$\u029b\b$\u0001$\u0004$\u029e\b$\u000b$\f$\u029f"+ + "\u0001%\u0001%\u0001&\u0001&\u0001\'\u0001\'\u0001\'\u0003\'\u02a9\b\'"+ + "\u0001(\u0001(\u0001)\u0001)\u0001)\u0003)\u02b0\b)\u0001*\u0001*\u0001"+ + "*\u0005*\u02b5\b*\n*\f*\u02b8\t*\u0001*\u0001*\u0001*\u0001*\u0001*\u0001"+ + "*\u0005*\u02c0\b*\n*\f*\u02c3\t*\u0001*\u0001*\u0001*\u0001*\u0001*\u0003"+ + "*\u02ca\b*\u0001*\u0003*\u02cd\b*\u0003*\u02cf\b*\u0001+\u0004+\u02d2"+ + "\b+\u000b+\f+\u02d3\u0001,\u0004,\u02d7\b,\u000b,\f,\u02d8\u0001,\u0001"+ + ",\u0005,\u02dd\b,\n,\f,\u02e0\t,\u0001,\u0001,\u0004,\u02e4\b,\u000b,"+ + "\f,\u02e5\u0001,\u0004,\u02e9\b,\u000b,\f,\u02ea\u0001,\u0001,\u0005,"+ + "\u02ef\b,\n,\f,\u02f2\t,\u0003,\u02f4\b,\u0001,\u0001,\u0001,\u0001,\u0004"+ + ",\u02fa\b,\u000b,\f,\u02fb\u0001,\u0001,\u0003,\u0300\b,\u0001-\u0001"+ + "-\u0001-\u0001.\u0001.\u0001.\u0001.\u0001/\u0001/\u0001/\u0001/\u0001"+ + "0\u00010\u00011\u00011\u00011\u00012\u00012\u00013\u00013\u00013\u0001"+ + "3\u00013\u00014\u00014\u00015\u00015\u00015\u00015\u00015\u00015\u0001"+ + "6\u00016\u00016\u00016\u00016\u00016\u00017\u00017\u00017\u00017\u0001"+ + "7\u00018\u00018\u00019\u00019\u00019\u0001:\u0001:\u0001:\u0001;\u0001"+ + ";\u0001;\u0001;\u0001;\u0001<\u0001<\u0001<\u0001<\u0001=\u0001=\u0001"+ + "=\u0001=\u0001=\u0001>\u0001>\u0001>\u0001>\u0001>\u0001>\u0001?\u0001"+ + "?\u0001?\u0001@\u0001@\u0001A\u0001A\u0001A\u0001A\u0001A\u0001A\u0001"+ + "B\u0001B\u0001C\u0001C\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001"+ + "E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001G\u0001G\u0001H\u0001H\u0001"+ + "H\u0001I\u0001I\u0001J\u0001J\u0001J\u0001K\u0001K\u0001L\u0001L\u0001"+ + "M\u0001M\u0001N\u0001N\u0001O\u0001O\u0001P\u0001P\u0001P\u0005P\u037a"+ + "\bP\nP\fP\u037d\tP\u0001P\u0001P\u0004P\u0381\bP\u000bP\fP\u0382\u0003"+ + "P\u0385\bP\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001R\u0001R\u0001R\u0001"+ + "R\u0001R\u0001S\u0001S\u0005S\u0393\bS\nS\fS\u0396\tS\u0001S\u0001S\u0003"+ + "S\u039a\bS\u0001S\u0004S\u039d\bS\u000bS\fS\u039e\u0003S\u03a1\bS\u0001"+ + "T\u0001T\u0004T\u03a5\bT\u000bT\fT\u03a6\u0001T\u0001T\u0001U\u0001U\u0001"+ + "V\u0001V\u0001V\u0001V\u0001W\u0001W\u0001W\u0001W\u0001X\u0001X\u0001"+ + "X\u0001X\u0001Y\u0001Y\u0001Y\u0001Y\u0001Y\u0001Z\u0001Z\u0001Z\u0001"+ + "Z\u0001[\u0001[\u0001[\u0001[\u0001\\\u0001\\\u0001\\\u0001\\\u0001]\u0001"+ + "]\u0001]\u0001]\u0001^\u0001^\u0001^\u0001^\u0001_\u0001_\u0001_\u0001"+ + "_\u0001_\u0001_\u0001_\u0001_\u0001_\u0001`\u0001`\u0001`\u0001`\u0001"+ + "a\u0001a\u0001a\u0001a\u0001b\u0001b\u0001b\u0001b\u0001c\u0001c\u0001"+ + "c\u0001c\u0001d\u0001d\u0001d\u0001d\u0001e\u0001e\u0001e\u0001e\u0001"+ + "e\u0001f\u0001f\u0001f\u0001f\u0001g\u0001g\u0001g\u0001g\u0001h\u0001"+ + "h\u0001h\u0001h\u0003h\u0400\bh\u0001i\u0001i\u0003i\u0404\bi\u0001i\u0005"+ + "i\u0407\bi\ni\fi\u040a\ti\u0001i\u0001i\u0003i\u040e\bi\u0001i\u0004i"+ + "\u0411\bi\u000bi\fi\u0412\u0003i\u0415\bi\u0001j\u0001j\u0004j\u0419\b"+ + "j\u000bj\fj\u041a\u0001k\u0001k\u0001k\u0001k\u0001l\u0001l\u0001l\u0001"+ + "l\u0001m\u0001m\u0001m\u0001m\u0001n\u0001n\u0001n\u0001n\u0001n\u0001"+ + "o\u0001o\u0001o\u0001o\u0001p\u0001p\u0001p\u0001p\u0001q\u0001q\u0001"+ + "q\u0001q\u0001r\u0001r\u0001r\u0001s\u0001s\u0001s\u0001s\u0001t\u0001"+ + "t\u0001t\u0001t\u0001u\u0001u\u0001u\u0001u\u0001v\u0001v\u0001v\u0001"+ + "v\u0001w\u0001w\u0001w\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0001"+ + "x\u0001y\u0001y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001z\u0001z\u0001"+ + "z\u0001z\u0001z\u0001{\u0001{\u0001|\u0004|\u0466\b|\u000b|\f|\u0467\u0001"+ + "|\u0001|\u0003|\u046c\b|\u0001|\u0004|\u046f\b|\u000b|\f|\u0470\u0001"+ + "}\u0001}\u0001}\u0001}\u0001~\u0001~\u0001~\u0001~\u0001\u007f\u0001\u007f"+ + "\u0001\u007f\u0001\u007f\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080"+ + "\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081"+ + "\u0001\u0082\u0001\u0082\u0001\u0082\u0001\u0082\u0001\u0083\u0001\u0083"+ + "\u0001\u0083\u0001\u0083\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084"+ + "\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0086\u0001\u0086"+ + "\u0001\u0086\u0001\u0086\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087"+ + "\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0089\u0001\u0089"+ + "\u0001\u0089\u0001\u0089\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a"+ + "\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008c"+ + "\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008d\u0001\u008d\u0001\u008d"+ + "\u0001\u008d\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008f"+ + "\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u0090\u0001\u0090"+ + "\u0001\u0090\u0001\u0090\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0091"+ + "\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0093\u0001\u0093"+ + "\u0001\u0093\u0001\u0093\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094"+ + "\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095"+ + "\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0097\u0001\u0097"+ + "\u0001\u0097\u0001\u0097\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098"+ + "\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u009a\u0001\u009a"+ + "\u0001\u009a\u0001\u009a\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009b"+ + "\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009d"+ + "\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009e\u0001\u009e\u0001\u009e"+ + "\u0001\u009e\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u00a0"+ + "\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1"+ + "\u0001\u00a1\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3"+ + "\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a4\u0001\u00a4"+ + "\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a5\u0001\u00a5\u0001\u00a5"+ + "\u0001\u00a5\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a7"+ + "\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8"+ + "\u0001\u00a8\u0001\u00a8\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9"+ + "\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9"+ + "\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00ab\u0001\u00ab"+ + "\u0001\u00ab\u0001\u00ab\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac"+ + "\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ae"+ + "\u0001\u00ae\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af"+ + "\u0004\u00af\u054f\b\u00af\u000b\u00af\f\u00af\u0550\u0001\u00b0\u0001"+ + "\u00b0\u0001\u00b0\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001"+ + "\u00b1\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b3\u0001"+ + "\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b4\u0001\u00b4\u0001"+ + "\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b5\u0001\u00b5\u0001"+ + "\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b6\u0001\u00b6\u0001"+ + "\u00b6\u0001\u00b6\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001"+ + "\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b9\u0001\u00b9\u0001"+ + "\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00ba\u0001\u00ba\u0001"+ + "\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00bb\u0001\u00bb\u0001"+ + "\u00bb\u0001\u00bb\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001"+ + "\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00be\u0001\u00be\u0001"+ + "\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00bf\u0001\u00bf\u0001"+ + "\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00c0\u0001\u00c0\u0001"+ + "\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c1\u0001\u00c1\u0001"+ + "\u00c1\u0001\u00c1\u0001\u00c1\u0002\u025c\u02c1\u0000\u00c2\u0010\u0001"+ + "\u0012\u0002\u0014\u0003\u0016\u0004\u0018\u0005\u001a\u0006\u001c\u0007"+ + "\u001e\b \t\"\n$\u000b&\f(\r*\u000e,\u000f.\u00100\u00112\u00124\u0013"+ + "6\u00148\u0015:\u0016<\u0017>\u0018@\u0000B\u0019D\u0000F\u0000H\u001a"+ + "J\u001bL\u001cN\u001dP\u0000R\u0000T\u0000V\u0000X\u0000Z\u0000\\\u0000"+ + "^\u0000`\u0000b\u0000d\u001ef\u001fh j!l\"n#p$r%t&v\'x(z)|*~+\u0080,\u0082"+ + "-\u0084.\u0086/\u00880\u008a1\u008c2\u008e3\u00904\u00925\u00946\u0096"+ + "7\u00988\u009a9\u009c:\u009e;\u00a0<\u00a2=\u00a4>\u00a6?\u00a8@\u00aa"+ + "A\u00acB\u00aeC\u00b0D\u00b2E\u00b4F\u00b6G\u00b8\u0000\u00baH\u00bcI"+ + "\u00beJ\u00c0K\u00c2\u0000\u00c4\u0000\u00c6\u0000\u00c8\u0000\u00ca\u0000"+ + "\u00cc\u0000\u00ceL\u00d0\u0000\u00d2\u0000\u00d4M\u00d6N\u00d8O\u00da"+ + "\u0000\u00dc\u0000\u00de\u0000\u00e0\u0000\u00e2\u0000\u00e4P\u00e6Q\u00e8"+ + "R\u00eaS\u00ec\u0000\u00ee\u0000\u00f0\u0000\u00f2\u0000\u00f4T\u00f6"+ + "\u0000\u00f8U\u00faV\u00fcW\u00fe\u0000\u0100\u0000\u0102X\u0104Y\u0106"+ + "\u0000\u0108Z\u010a\u0000\u010c[\u010e\\\u0110]\u0112\u0000\u0114\u0000"+ + "\u0116\u0000\u0118\u0000\u011a\u0000\u011c\u0000\u011e\u0000\u0120^\u0122"+ + "_\u0124`\u0126\u0000\u0128\u0000\u012a\u0000\u012c\u0000\u012e\u0000\u0130"+ + "\u0000\u0132\u0000\u0134a\u0136b\u0138c\u013a\u0000\u013c\u0000\u013e"+ + "\u0000\u0140\u0000\u0142d\u0144e\u0146f\u0148\u0000\u014a\u0000\u014c"+ + "\u0000\u014e\u0000\u0150g\u0152h\u0154i\u0156\u0000\u0158j\u015ak\u015c"+ + "l\u015em\u0160\u0000\u0162n\u0164o\u0166p\u0168q\u016a\u0000\u016cr\u016e"+ + "s\u0170t\u0172u\u0174v\u0176\u0000\u0178\u0000\u017a\u0000\u017cw\u017e"+ + "x\u0180y\u0182\u0000\u0184\u0000\u0186z\u0188{\u018a|\u018c\u0000\u018e"+ + "\u0000\u0190\u0000\u0192\u0000\u0010\u0000\u0001\u0002\u0003\u0004\u0005"+ "\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\r\u0006\u0000\t\n\r\r //[[]"+ - "]\u0002\u0000\n\n\r\r\u0003\u0000\t\n\r\r \n\u0000\t\n\r\r ,,//==[["+ - "]]``||\u0002\u0000**//\u0001\u000009\u0002\u0000AZaz\u0005\u0000\"\"\\"+ - "\\nnrrtt\u0004\u0000\n\n\r\r\"\"\\\\\u0002\u0000EEee\u0002\u0000++--\u0001"+ - "\u0000``\u000b\u0000\t\n\r\r \"#,,//::<<>?\\\\||\u05a8\u0000\u0010\u0001"+ - "\u0000\u0000\u0000\u0000\u0012\u0001\u0000\u0000\u0000\u0000\u0014\u0001"+ - "\u0000\u0000\u0000\u0000\u0016\u0001\u0000\u0000\u0000\u0000\u0018\u0001"+ - "\u0000\u0000\u0000\u0000\u001a\u0001\u0000\u0000\u0000\u0000\u001c\u0001"+ - "\u0000\u0000\u0000\u0000\u001e\u0001\u0000\u0000\u0000\u0000 \u0001\u0000"+ - "\u0000\u0000\u0000\"\u0001\u0000\u0000\u0000\u0000$\u0001\u0000\u0000"+ - "\u0000\u0000&\u0001\u0000\u0000\u0000\u0000(\u0001\u0000\u0000\u0000\u0000"+ - "*\u0001\u0000\u0000\u0000\u0000,\u0001\u0000\u0000\u0000\u0000.\u0001"+ - "\u0000\u0000\u0000\u00000\u0001\u0000\u0000\u0000\u00002\u0001\u0000\u0000"+ - "\u0000\u00004\u0001\u0000\u0000\u0000\u00006\u0001\u0000\u0000\u0000\u0000"+ - "8\u0001\u0000\u0000\u0000\u0000:\u0001\u0000\u0000\u0000\u0000<\u0001"+ - "\u0000\u0000\u0000\u0000>\u0001\u0000\u0000\u0000\u0000B\u0001\u0000\u0000"+ - "\u0000\u0001D\u0001\u0000\u0000\u0000\u0001F\u0001\u0000\u0000\u0000\u0001"+ - "H\u0001\u0000\u0000\u0000\u0001J\u0001\u0000\u0000\u0000\u0001L\u0001"+ - "\u0000\u0000\u0000\u0002N\u0001\u0000\u0000\u0000\u0002d\u0001\u0000\u0000"+ - "\u0000\u0002f\u0001\u0000\u0000\u0000\u0002h\u0001\u0000\u0000\u0000\u0002"+ - "j\u0001\u0000\u0000\u0000\u0002l\u0001\u0000\u0000\u0000\u0002n\u0001"+ - "\u0000\u0000\u0000\u0002p\u0001\u0000\u0000\u0000\u0002r\u0001\u0000\u0000"+ - "\u0000\u0002t\u0001\u0000\u0000\u0000\u0002v\u0001\u0000\u0000\u0000\u0002"+ - "x\u0001\u0000\u0000\u0000\u0002z\u0001\u0000\u0000\u0000\u0002|\u0001"+ - "\u0000\u0000\u0000\u0002~\u0001\u0000\u0000\u0000\u0002\u0080\u0001\u0000"+ - "\u0000\u0000\u0002\u0082\u0001\u0000\u0000\u0000\u0002\u0084\u0001\u0000"+ - "\u0000\u0000\u0002\u0086\u0001\u0000\u0000\u0000\u0002\u0088\u0001\u0000"+ - "\u0000\u0000\u0002\u008a\u0001\u0000\u0000\u0000\u0002\u008c\u0001\u0000"+ - "\u0000\u0000\u0002\u008e\u0001\u0000\u0000\u0000\u0002\u0090\u0001\u0000"+ - "\u0000\u0000\u0002\u0092\u0001\u0000\u0000\u0000\u0002\u0094\u0001\u0000"+ - "\u0000\u0000\u0002\u0096\u0001\u0000\u0000\u0000\u0002\u0098\u0001\u0000"+ - "\u0000\u0000\u0002\u009a\u0001\u0000\u0000\u0000\u0002\u009c\u0001\u0000"+ - "\u0000\u0000\u0002\u009e\u0001\u0000\u0000\u0000\u0002\u00a0\u0001\u0000"+ - "\u0000\u0000\u0002\u00a2\u0001\u0000\u0000\u0000\u0002\u00a4\u0001\u0000"+ - "\u0000\u0000\u0002\u00a6\u0001\u0000\u0000\u0000\u0002\u00a8\u0001\u0000"+ - "\u0000\u0000\u0002\u00aa\u0001\u0000\u0000\u0000\u0002\u00ac\u0001\u0000"+ - "\u0000\u0000\u0002\u00ae\u0001\u0000\u0000\u0000\u0002\u00b0\u0001\u0000"+ - "\u0000\u0000\u0002\u00b2\u0001\u0000\u0000\u0000\u0002\u00b4\u0001\u0000"+ - "\u0000\u0000\u0002\u00b6\u0001\u0000\u0000\u0000\u0002\u00ba\u0001\u0000"+ - "\u0000\u0000\u0002\u00bc\u0001\u0000\u0000\u0000\u0002\u00be\u0001\u0000"+ - "\u0000\u0000\u0002\u00c0\u0001\u0000\u0000\u0000\u0003\u00c2\u0001\u0000"+ - "\u0000\u0000\u0003\u00c4\u0001\u0000\u0000\u0000\u0003\u00c6\u0001\u0000"+ - "\u0000\u0000\u0003\u00c8\u0001\u0000\u0000\u0000\u0003\u00ca\u0001\u0000"+ - "\u0000\u0000\u0003\u00cc\u0001\u0000\u0000\u0000\u0003\u00ce\u0001\u0000"+ - "\u0000\u0000\u0003\u00d0\u0001\u0000\u0000\u0000\u0003\u00d2\u0001\u0000"+ - "\u0000\u0000\u0003\u00d4\u0001\u0000\u0000\u0000\u0003\u00d6\u0001\u0000"+ - "\u0000\u0000\u0004\u00d8\u0001\u0000\u0000\u0000\u0004\u00da\u0001\u0000"+ - "\u0000\u0000\u0004\u00dc\u0001\u0000\u0000\u0000\u0004\u00e2\u0001\u0000"+ - "\u0000\u0000\u0004\u00e4\u0001\u0000\u0000\u0000\u0004\u00e6\u0001\u0000"+ - "\u0000\u0000\u0004\u00e8\u0001\u0000\u0000\u0000\u0005\u00ea\u0001\u0000"+ - "\u0000\u0000\u0005\u00ec\u0001\u0000\u0000\u0000\u0005\u00ee\u0001\u0000"+ - "\u0000\u0000\u0005\u00f0\u0001\u0000\u0000\u0000\u0005\u00f2\u0001\u0000"+ - "\u0000\u0000\u0005\u00f4\u0001\u0000\u0000\u0000\u0005\u00f6\u0001\u0000"+ - "\u0000\u0000\u0005\u00f8\u0001\u0000\u0000\u0000\u0005\u00fa\u0001\u0000"+ - "\u0000\u0000\u0006\u00fc\u0001\u0000\u0000\u0000\u0006\u00fe\u0001\u0000"+ - "\u0000\u0000\u0006\u0100\u0001\u0000\u0000\u0000\u0006\u0102\u0001\u0000"+ - "\u0000\u0000\u0006\u0106\u0001\u0000\u0000\u0000\u0006\u0108\u0001\u0000"+ - "\u0000\u0000\u0006\u010a\u0001\u0000\u0000\u0000\u0006\u010c\u0001\u0000"+ - "\u0000\u0000\u0006\u010e\u0001\u0000\u0000\u0000\u0006\u0110\u0001\u0000"+ - "\u0000\u0000\u0007\u0112\u0001\u0000\u0000\u0000\u0007\u0114\u0001\u0000"+ - "\u0000\u0000\u0007\u0116\u0001\u0000\u0000\u0000\u0007\u0118\u0001\u0000"+ - "\u0000\u0000\u0007\u011a\u0001\u0000\u0000\u0000\u0007\u011c\u0001\u0000"+ - "\u0000\u0000\u0007\u011e\u0001\u0000\u0000\u0000\u0007\u0120\u0001\u0000"+ - "\u0000\u0000\u0007\u0122\u0001\u0000\u0000\u0000\u0007\u0124\u0001\u0000"+ - "\u0000\u0000\b\u0126\u0001\u0000\u0000\u0000\b\u0128\u0001\u0000\u0000"+ - "\u0000\b\u012a\u0001\u0000\u0000\u0000\b\u012c\u0001\u0000\u0000\u0000"+ - "\b\u012e\u0001\u0000\u0000\u0000\b\u0130\u0001\u0000\u0000\u0000\b\u0132"+ - "\u0001\u0000\u0000\u0000\b\u0134\u0001\u0000\u0000\u0000\t\u0136\u0001"+ - "\u0000\u0000\u0000\t\u0138\u0001\u0000\u0000\u0000\t\u013a\u0001\u0000"+ - "\u0000\u0000\t\u013c\u0001\u0000\u0000\u0000\t\u013e\u0001\u0000\u0000"+ - "\u0000\t\u0140\u0001\u0000\u0000\u0000\t\u0142\u0001\u0000\u0000\u0000"+ - "\n\u0144\u0001\u0000\u0000\u0000\n\u0146\u0001\u0000\u0000\u0000\n\u0148"+ - "\u0001\u0000\u0000\u0000\n\u014a\u0001\u0000\u0000\u0000\n\u014c\u0001"+ - "\u0000\u0000\u0000\n\u014e\u0001\u0000\u0000\u0000\n\u0150\u0001\u0000"+ - "\u0000\u0000\u000b\u0152\u0001\u0000\u0000\u0000\u000b\u0154\u0001\u0000"+ + "]\u0002\u0000\n\n\r\r\u0003\u0000\t\n\r\r \u000b\u0000\t\n\r\r \"\""+ + ",,//::==[[]]||\u0002\u0000**//\u0001\u000009\u0002\u0000AZaz\u0005\u0000"+ + "\"\"\\\\nnrrtt\u0004\u0000\n\n\r\r\"\"\\\\\u0002\u0000EEee\u0002\u0000"+ + "++--\u0001\u0000``\u000b\u0000\t\n\r\r \"#,,//::<<>?\\\\||\u05c4\u0000"+ + "\u0010\u0001\u0000\u0000\u0000\u0000\u0012\u0001\u0000\u0000\u0000\u0000"+ + "\u0014\u0001\u0000\u0000\u0000\u0000\u0016\u0001\u0000\u0000\u0000\u0000"+ + "\u0018\u0001\u0000\u0000\u0000\u0000\u001a\u0001\u0000\u0000\u0000\u0000"+ + "\u001c\u0001\u0000\u0000\u0000\u0000\u001e\u0001\u0000\u0000\u0000\u0000"+ + " \u0001\u0000\u0000\u0000\u0000\"\u0001\u0000\u0000\u0000\u0000$\u0001"+ + "\u0000\u0000\u0000\u0000&\u0001\u0000\u0000\u0000\u0000(\u0001\u0000\u0000"+ + "\u0000\u0000*\u0001\u0000\u0000\u0000\u0000,\u0001\u0000\u0000\u0000\u0000"+ + ".\u0001\u0000\u0000\u0000\u00000\u0001\u0000\u0000\u0000\u00002\u0001"+ + "\u0000\u0000\u0000\u00004\u0001\u0000\u0000\u0000\u00006\u0001\u0000\u0000"+ + "\u0000\u00008\u0001\u0000\u0000\u0000\u0000:\u0001\u0000\u0000\u0000\u0000"+ + "<\u0001\u0000\u0000\u0000\u0000>\u0001\u0000\u0000\u0000\u0000B\u0001"+ + "\u0000\u0000\u0000\u0001D\u0001\u0000\u0000\u0000\u0001F\u0001\u0000\u0000"+ + "\u0000\u0001H\u0001\u0000\u0000\u0000\u0001J\u0001\u0000\u0000\u0000\u0001"+ + "L\u0001\u0000\u0000\u0000\u0002N\u0001\u0000\u0000\u0000\u0002d\u0001"+ + "\u0000\u0000\u0000\u0002f\u0001\u0000\u0000\u0000\u0002h\u0001\u0000\u0000"+ + "\u0000\u0002j\u0001\u0000\u0000\u0000\u0002l\u0001\u0000\u0000\u0000\u0002"+ + "n\u0001\u0000\u0000\u0000\u0002p\u0001\u0000\u0000\u0000\u0002r\u0001"+ + "\u0000\u0000\u0000\u0002t\u0001\u0000\u0000\u0000\u0002v\u0001\u0000\u0000"+ + "\u0000\u0002x\u0001\u0000\u0000\u0000\u0002z\u0001\u0000\u0000\u0000\u0002"+ + "|\u0001\u0000\u0000\u0000\u0002~\u0001\u0000\u0000\u0000\u0002\u0080\u0001"+ + "\u0000\u0000\u0000\u0002\u0082\u0001\u0000\u0000\u0000\u0002\u0084\u0001"+ + "\u0000\u0000\u0000\u0002\u0086\u0001\u0000\u0000\u0000\u0002\u0088\u0001"+ + "\u0000\u0000\u0000\u0002\u008a\u0001\u0000\u0000\u0000\u0002\u008c\u0001"+ + "\u0000\u0000\u0000\u0002\u008e\u0001\u0000\u0000\u0000\u0002\u0090\u0001"+ + "\u0000\u0000\u0000\u0002\u0092\u0001\u0000\u0000\u0000\u0002\u0094\u0001"+ + "\u0000\u0000\u0000\u0002\u0096\u0001\u0000\u0000\u0000\u0002\u0098\u0001"+ + "\u0000\u0000\u0000\u0002\u009a\u0001\u0000\u0000\u0000\u0002\u009c\u0001"+ + "\u0000\u0000\u0000\u0002\u009e\u0001\u0000\u0000\u0000\u0002\u00a0\u0001"+ + "\u0000\u0000\u0000\u0002\u00a2\u0001\u0000\u0000\u0000\u0002\u00a4\u0001"+ + "\u0000\u0000\u0000\u0002\u00a6\u0001\u0000\u0000\u0000\u0002\u00a8\u0001"+ + "\u0000\u0000\u0000\u0002\u00aa\u0001\u0000\u0000\u0000\u0002\u00ac\u0001"+ + "\u0000\u0000\u0000\u0002\u00ae\u0001\u0000\u0000\u0000\u0002\u00b0\u0001"+ + "\u0000\u0000\u0000\u0002\u00b2\u0001\u0000\u0000\u0000\u0002\u00b4\u0001"+ + "\u0000\u0000\u0000\u0002\u00b6\u0001\u0000\u0000\u0000\u0002\u00ba\u0001"+ + "\u0000\u0000\u0000\u0002\u00bc\u0001\u0000\u0000\u0000\u0002\u00be\u0001"+ + "\u0000\u0000\u0000\u0002\u00c0\u0001\u0000\u0000\u0000\u0003\u00c2\u0001"+ + "\u0000\u0000\u0000\u0003\u00c4\u0001\u0000\u0000\u0000\u0003\u00c6\u0001"+ + "\u0000\u0000\u0000\u0003\u00c8\u0001\u0000\u0000\u0000\u0003\u00ca\u0001"+ + "\u0000\u0000\u0000\u0003\u00cc\u0001\u0000\u0000\u0000\u0003\u00ce\u0001"+ + "\u0000\u0000\u0000\u0003\u00d0\u0001\u0000\u0000\u0000\u0003\u00d2\u0001"+ + "\u0000\u0000\u0000\u0003\u00d4\u0001\u0000\u0000\u0000\u0003\u00d6\u0001"+ + "\u0000\u0000\u0000\u0003\u00d8\u0001\u0000\u0000\u0000\u0004\u00da\u0001"+ + "\u0000\u0000\u0000\u0004\u00dc\u0001\u0000\u0000\u0000\u0004\u00de\u0001"+ + "\u0000\u0000\u0000\u0004\u00e4\u0001\u0000\u0000\u0000\u0004\u00e6\u0001"+ + "\u0000\u0000\u0000\u0004\u00e8\u0001\u0000\u0000\u0000\u0004\u00ea\u0001"+ + "\u0000\u0000\u0000\u0005\u00ec\u0001\u0000\u0000\u0000\u0005\u00ee\u0001"+ + "\u0000\u0000\u0000\u0005\u00f0\u0001\u0000\u0000\u0000\u0005\u00f2\u0001"+ + "\u0000\u0000\u0000\u0005\u00f4\u0001\u0000\u0000\u0000\u0005\u00f6\u0001"+ + "\u0000\u0000\u0000\u0005\u00f8\u0001\u0000\u0000\u0000\u0005\u00fa\u0001"+ + "\u0000\u0000\u0000\u0005\u00fc\u0001\u0000\u0000\u0000\u0006\u00fe\u0001"+ + "\u0000\u0000\u0000\u0006\u0100\u0001\u0000\u0000\u0000\u0006\u0102\u0001"+ + "\u0000\u0000\u0000\u0006\u0104\u0001\u0000\u0000\u0000\u0006\u0108\u0001"+ + "\u0000\u0000\u0000\u0006\u010a\u0001\u0000\u0000\u0000\u0006\u010c\u0001"+ + "\u0000\u0000\u0000\u0006\u010e\u0001\u0000\u0000\u0000\u0006\u0110\u0001"+ + "\u0000\u0000\u0000\u0007\u0112\u0001\u0000\u0000\u0000\u0007\u0114\u0001"+ + "\u0000\u0000\u0000\u0007\u0116\u0001\u0000\u0000\u0000\u0007\u0118\u0001"+ + "\u0000\u0000\u0000\u0007\u011a\u0001\u0000\u0000\u0000\u0007\u011c\u0001"+ + "\u0000\u0000\u0000\u0007\u011e\u0001\u0000\u0000\u0000\u0007\u0120\u0001"+ + "\u0000\u0000\u0000\u0007\u0122\u0001\u0000\u0000\u0000\u0007\u0124\u0001"+ + "\u0000\u0000\u0000\b\u0126\u0001\u0000\u0000\u0000\b\u0128\u0001\u0000"+ + "\u0000\u0000\b\u012a\u0001\u0000\u0000\u0000\b\u012c\u0001\u0000\u0000"+ + "\u0000\b\u012e\u0001\u0000\u0000\u0000\b\u0130\u0001\u0000\u0000\u0000"+ + "\b\u0132\u0001\u0000\u0000\u0000\b\u0134\u0001\u0000\u0000\u0000\b\u0136"+ + "\u0001\u0000\u0000\u0000\b\u0138\u0001\u0000\u0000\u0000\t\u013a\u0001"+ + "\u0000\u0000\u0000\t\u013c\u0001\u0000\u0000\u0000\t\u013e\u0001\u0000"+ + "\u0000\u0000\t\u0140\u0001\u0000\u0000\u0000\t\u0142\u0001\u0000\u0000"+ + "\u0000\t\u0144\u0001\u0000\u0000\u0000\t\u0146\u0001\u0000\u0000\u0000"+ + "\n\u0148\u0001\u0000\u0000\u0000\n\u014a\u0001\u0000\u0000\u0000\n\u014c"+ + "\u0001\u0000\u0000\u0000\n\u014e\u0001\u0000\u0000\u0000\n\u0150\u0001"+ + "\u0000\u0000\u0000\n\u0152\u0001\u0000\u0000\u0000\n\u0154\u0001\u0000"+ "\u0000\u0000\u000b\u0156\u0001\u0000\u0000\u0000\u000b\u0158\u0001\u0000"+ - "\u0000\u0000\u000b\u015a\u0001\u0000\u0000\u0000\f\u015c\u0001\u0000\u0000"+ - "\u0000\f\u015e\u0001\u0000\u0000\u0000\f\u0160\u0001\u0000\u0000\u0000"+ - "\f\u0162\u0001\u0000\u0000\u0000\f\u0164\u0001\u0000\u0000\u0000\r\u0166"+ - "\u0001\u0000\u0000\u0000\r\u0168\u0001\u0000\u0000\u0000\r\u016a\u0001"+ - "\u0000\u0000\u0000\r\u016c\u0001\u0000\u0000\u0000\r\u016e\u0001\u0000"+ - "\u0000\u0000\r\u0170\u0001\u0000\u0000\u0000\u000e\u0172\u0001\u0000\u0000"+ - "\u0000\u000e\u0174\u0001\u0000\u0000\u0000\u000e\u0176\u0001\u0000\u0000"+ + "\u0000\u0000\u000b\u015a\u0001\u0000\u0000\u0000\u000b\u015c\u0001\u0000"+ + "\u0000\u0000\u000b\u015e\u0001\u0000\u0000\u0000\f\u0160\u0001\u0000\u0000"+ + "\u0000\f\u0162\u0001\u0000\u0000\u0000\f\u0164\u0001\u0000\u0000\u0000"+ + "\f\u0166\u0001\u0000\u0000\u0000\f\u0168\u0001\u0000\u0000\u0000\r\u016a"+ + "\u0001\u0000\u0000\u0000\r\u016c\u0001\u0000\u0000\u0000\r\u016e\u0001"+ + "\u0000\u0000\u0000\r\u0170\u0001\u0000\u0000\u0000\r\u0172\u0001\u0000"+ + "\u0000\u0000\r\u0174\u0001\u0000\u0000\u0000\u000e\u0176\u0001\u0000\u0000"+ "\u0000\u000e\u0178\u0001\u0000\u0000\u0000\u000e\u017a\u0001\u0000\u0000"+ - "\u0000\u000f\u017c\u0001\u0000\u0000\u0000\u000f\u017e\u0001\u0000\u0000"+ - "\u0000\u000f\u0180\u0001\u0000\u0000\u0000\u000f\u0182\u0001\u0000\u0000"+ + "\u0000\u000e\u017c\u0001\u0000\u0000\u0000\u000e\u017e\u0001\u0000\u0000"+ + "\u0000\u000e\u0180\u0001\u0000\u0000\u0000\u000f\u0182\u0001\u0000\u0000"+ "\u0000\u000f\u0184\u0001\u0000\u0000\u0000\u000f\u0186\u0001\u0000\u0000"+ "\u0000\u000f\u0188\u0001\u0000\u0000\u0000\u000f\u018a\u0001\u0000\u0000"+ - "\u0000\u0010\u018c\u0001\u0000\u0000\u0000\u0012\u0196\u0001\u0000\u0000"+ - "\u0000\u0014\u019d\u0001\u0000\u0000\u0000\u0016\u01a6\u0001\u0000\u0000"+ - "\u0000\u0018\u01ad\u0001\u0000\u0000\u0000\u001a\u01b7\u0001\u0000\u0000"+ - "\u0000\u001c\u01be\u0001\u0000\u0000\u0000\u001e\u01c5\u0001\u0000\u0000"+ - "\u0000 \u01d3\u0001\u0000\u0000\u0000\"\u01da\u0001\u0000\u0000\u0000"+ - "$\u01e2\u0001\u0000\u0000\u0000&\u01eb\u0001\u0000\u0000\u0000(\u01f2"+ - "\u0001\u0000\u0000\u0000*\u01fc\u0001\u0000\u0000\u0000,\u0208\u0001\u0000"+ - "\u0000\u0000.\u0211\u0001\u0000\u0000\u00000\u0217\u0001\u0000\u0000\u0000"+ - "2\u021e\u0001\u0000\u0000\u00004\u0225\u0001\u0000\u0000\u00006\u022d"+ - "\u0001\u0000\u0000\u00008\u0236\u0001\u0000\u0000\u0000:\u023c\u0001\u0000"+ - "\u0000\u0000<\u024d\u0001\u0000\u0000\u0000>\u025d\u0001\u0000\u0000\u0000"+ - "@\u0266\u0001\u0000\u0000\u0000B\u0269\u0001\u0000\u0000\u0000D\u026d"+ - "\u0001\u0000\u0000\u0000F\u0272\u0001\u0000\u0000\u0000H\u0277\u0001\u0000"+ - "\u0000\u0000J\u027b\u0001\u0000\u0000\u0000L\u027f\u0001\u0000\u0000\u0000"+ - "N\u0283\u0001\u0000\u0000\u0000P\u0287\u0001\u0000\u0000\u0000R\u0289"+ - "\u0001\u0000\u0000\u0000T\u028b\u0001\u0000\u0000\u0000V\u028e\u0001\u0000"+ - "\u0000\u0000X\u0290\u0001\u0000\u0000\u0000Z\u0299\u0001\u0000\u0000\u0000"+ - "\\\u029b\u0001\u0000\u0000\u0000^\u02a0\u0001\u0000\u0000\u0000`\u02a2"+ - "\u0001\u0000\u0000\u0000b\u02a7\u0001\u0000\u0000\u0000d\u02c6\u0001\u0000"+ - "\u0000\u0000f\u02c9\u0001\u0000\u0000\u0000h\u02f7\u0001\u0000\u0000\u0000"+ - "j\u02f9\u0001\u0000\u0000\u0000l\u02fc\u0001\u0000\u0000\u0000n\u0300"+ - "\u0001\u0000\u0000\u0000p\u0304\u0001\u0000\u0000\u0000r\u0306\u0001\u0000"+ - "\u0000\u0000t\u0309\u0001\u0000\u0000\u0000v\u030b\u0001\u0000\u0000\u0000"+ - "x\u0310\u0001\u0000\u0000\u0000z\u0312\u0001\u0000\u0000\u0000|\u0318"+ - "\u0001\u0000\u0000\u0000~\u031e\u0001\u0000\u0000\u0000\u0080\u0323\u0001"+ - "\u0000\u0000\u0000\u0082\u0325\u0001\u0000\u0000\u0000\u0084\u0328\u0001"+ - "\u0000\u0000\u0000\u0086\u032b\u0001\u0000\u0000\u0000\u0088\u0330\u0001"+ - "\u0000\u0000\u0000\u008a\u0334\u0001\u0000\u0000\u0000\u008c\u0339\u0001"+ - "\u0000\u0000\u0000\u008e\u033f\u0001\u0000\u0000\u0000\u0090\u0342\u0001"+ - "\u0000\u0000\u0000\u0092\u0344\u0001\u0000\u0000\u0000\u0094\u034a\u0001"+ - "\u0000\u0000\u0000\u0096\u034c\u0001\u0000\u0000\u0000\u0098\u0351\u0001"+ - "\u0000\u0000\u0000\u009a\u0354\u0001\u0000\u0000\u0000\u009c\u0357\u0001"+ - "\u0000\u0000\u0000\u009e\u035a\u0001\u0000\u0000\u0000\u00a0\u035c\u0001"+ - "\u0000\u0000\u0000\u00a2\u035f\u0001\u0000\u0000\u0000\u00a4\u0361\u0001"+ - "\u0000\u0000\u0000\u00a6\u0364\u0001\u0000\u0000\u0000\u00a8\u0366\u0001"+ - "\u0000\u0000\u0000\u00aa\u0368\u0001\u0000\u0000\u0000\u00ac\u036a\u0001"+ - "\u0000\u0000\u0000\u00ae\u036c\u0001\u0000\u0000\u0000\u00b0\u037c\u0001"+ - "\u0000\u0000\u0000\u00b2\u037e\u0001\u0000\u0000\u0000\u00b4\u0383\u0001"+ - "\u0000\u0000\u0000\u00b6\u0398\u0001\u0000\u0000\u0000\u00b8\u039a\u0001"+ - "\u0000\u0000\u0000\u00ba\u03a2\u0001\u0000\u0000\u0000\u00bc\u03a4\u0001"+ - "\u0000\u0000\u0000\u00be\u03a8\u0001\u0000\u0000\u0000\u00c0\u03ac\u0001"+ - "\u0000\u0000\u0000\u00c2\u03b0\u0001\u0000\u0000\u0000\u00c4\u03b5\u0001"+ - "\u0000\u0000\u0000\u00c6\u03b9\u0001\u0000\u0000\u0000\u00c8\u03bd\u0001"+ - "\u0000\u0000\u0000\u00ca\u03c1\u0001\u0000\u0000\u0000\u00cc\u03c5\u0001"+ - "\u0000\u0000\u0000\u00ce\u03c9\u0001\u0000\u0000\u0000\u00d0\u03d2\u0001"+ - "\u0000\u0000\u0000\u00d2\u03d6\u0001\u0000\u0000\u0000\u00d4\u03da\u0001"+ - "\u0000\u0000\u0000\u00d6\u03de\u0001\u0000\u0000\u0000\u00d8\u03e2\u0001"+ - "\u0000\u0000\u0000\u00da\u03e7\u0001\u0000\u0000\u0000\u00dc\u03eb\u0001"+ - "\u0000\u0000\u0000\u00de\u03f3\u0001\u0000\u0000\u0000\u00e0\u0408\u0001"+ - "\u0000\u0000\u0000\u00e2\u040c\u0001\u0000\u0000\u0000\u00e4\u0410\u0001"+ - "\u0000\u0000\u0000\u00e6\u0414\u0001\u0000\u0000\u0000\u00e8\u0418\u0001"+ - "\u0000\u0000\u0000\u00ea\u041c\u0001\u0000\u0000\u0000\u00ec\u0421\u0001"+ - "\u0000\u0000\u0000\u00ee\u0425\u0001\u0000\u0000\u0000\u00f0\u0429\u0001"+ - "\u0000\u0000\u0000\u00f2\u042d\u0001\u0000\u0000\u0000\u00f4\u0430\u0001"+ - "\u0000\u0000\u0000\u00f6\u0434\u0001\u0000\u0000\u0000\u00f8\u0438\u0001"+ - "\u0000\u0000\u0000\u00fa\u043c\u0001\u0000\u0000\u0000\u00fc\u0440\u0001"+ - "\u0000\u0000\u0000\u00fe\u0445\u0001\u0000\u0000\u0000\u0100\u044a\u0001"+ - "\u0000\u0000\u0000\u0102\u044f\u0001\u0000\u0000\u0000\u0104\u0456\u0001"+ - "\u0000\u0000\u0000\u0106\u045f\u0001\u0000\u0000\u0000\u0108\u0466\u0001"+ - "\u0000\u0000\u0000\u010a\u046a\u0001\u0000\u0000\u0000\u010c\u046e\u0001"+ - "\u0000\u0000\u0000\u010e\u0472\u0001\u0000\u0000\u0000\u0110\u0476\u0001"+ - "\u0000\u0000\u0000\u0112\u047a\u0001\u0000\u0000\u0000\u0114\u0480\u0001"+ - "\u0000\u0000\u0000\u0116\u0484\u0001\u0000\u0000\u0000\u0118\u0488\u0001"+ - "\u0000\u0000\u0000\u011a\u048c\u0001\u0000\u0000\u0000\u011c\u0490\u0001"+ - "\u0000\u0000\u0000\u011e\u0494\u0001\u0000\u0000\u0000\u0120\u0498\u0001"+ - "\u0000\u0000\u0000\u0122\u049c\u0001\u0000\u0000\u0000\u0124\u04a0\u0001"+ - "\u0000\u0000\u0000\u0126\u04a4\u0001\u0000\u0000\u0000\u0128\u04a9\u0001"+ - "\u0000\u0000\u0000\u012a\u04ad\u0001\u0000\u0000\u0000\u012c\u04b1\u0001"+ - "\u0000\u0000\u0000\u012e\u04b6\u0001\u0000\u0000\u0000\u0130\u04ba\u0001"+ - "\u0000\u0000\u0000\u0132\u04be\u0001\u0000\u0000\u0000\u0134\u04c2\u0001"+ - "\u0000\u0000\u0000\u0136\u04c6\u0001\u0000\u0000\u0000\u0138\u04cc\u0001"+ - "\u0000\u0000\u0000\u013a\u04d0\u0001\u0000\u0000\u0000\u013c\u04d4\u0001"+ - "\u0000\u0000\u0000\u013e\u04d8\u0001\u0000\u0000\u0000\u0140\u04dc\u0001"+ - "\u0000\u0000\u0000\u0142\u04e0\u0001\u0000\u0000\u0000\u0144\u04e4\u0001"+ - "\u0000\u0000\u0000\u0146\u04e9\u0001\u0000\u0000\u0000\u0148\u04ed\u0001"+ - "\u0000\u0000\u0000\u014a\u04f1\u0001\u0000\u0000\u0000\u014c\u04f5\u0001"+ - "\u0000\u0000\u0000\u014e\u04f9\u0001\u0000\u0000\u0000\u0150\u04fd\u0001"+ - "\u0000\u0000\u0000\u0152\u0501\u0001\u0000\u0000\u0000\u0154\u0506\u0001"+ - "\u0000\u0000\u0000\u0156\u050b\u0001\u0000\u0000\u0000\u0158\u050f\u0001"+ - "\u0000\u0000\u0000\u015a\u0513\u0001\u0000\u0000\u0000\u015c\u0517\u0001"+ - "\u0000\u0000\u0000\u015e\u051c\u0001\u0000\u0000\u0000\u0160\u0526\u0001"+ - "\u0000\u0000\u0000\u0162\u052a\u0001\u0000\u0000\u0000\u0164\u052e\u0001"+ - "\u0000\u0000\u0000\u0166\u0532\u0001\u0000\u0000\u0000\u0168\u0537\u0001"+ - "\u0000\u0000\u0000\u016a\u053e\u0001\u0000\u0000\u0000\u016c\u0542\u0001"+ - "\u0000\u0000\u0000\u016e\u0546\u0001\u0000\u0000\u0000\u0170\u054a\u0001"+ - "\u0000\u0000\u0000\u0172\u054e\u0001\u0000\u0000\u0000\u0174\u0553\u0001"+ - "\u0000\u0000\u0000\u0176\u0559\u0001\u0000\u0000\u0000\u0178\u055d\u0001"+ - "\u0000\u0000\u0000\u017a\u0561\u0001\u0000\u0000\u0000\u017c\u0565\u0001"+ - "\u0000\u0000\u0000\u017e\u056b\u0001\u0000\u0000\u0000\u0180\u056f\u0001"+ - "\u0000\u0000\u0000\u0182\u0573\u0001\u0000\u0000\u0000\u0184\u0577\u0001"+ - "\u0000\u0000\u0000\u0186\u057d\u0001\u0000\u0000\u0000\u0188\u0583\u0001"+ - "\u0000\u0000\u0000\u018a\u0589\u0001\u0000\u0000\u0000\u018c\u018d\u0005"+ - "d\u0000\u0000\u018d\u018e\u0005i\u0000\u0000\u018e\u018f\u0005s\u0000"+ - "\u0000\u018f\u0190\u0005s\u0000\u0000\u0190\u0191\u0005e\u0000\u0000\u0191"+ - "\u0192\u0005c\u0000\u0000\u0192\u0193\u0005t\u0000\u0000\u0193\u0194\u0001"+ - "\u0000\u0000\u0000\u0194\u0195\u0006\u0000\u0000\u0000\u0195\u0011\u0001"+ - "\u0000\u0000\u0000\u0196\u0197\u0005d\u0000\u0000\u0197\u0198\u0005r\u0000"+ - "\u0000\u0198\u0199\u0005o\u0000\u0000\u0199\u019a\u0005p\u0000\u0000\u019a"+ - "\u019b\u0001\u0000\u0000\u0000\u019b\u019c\u0006\u0001\u0001\u0000\u019c"+ - "\u0013\u0001\u0000\u0000\u0000\u019d\u019e\u0005e\u0000\u0000\u019e\u019f"+ - "\u0005n\u0000\u0000\u019f\u01a0\u0005r\u0000\u0000\u01a0\u01a1\u0005i"+ - "\u0000\u0000\u01a1\u01a2\u0005c\u0000\u0000\u01a2\u01a3\u0005h\u0000\u0000"+ - "\u01a3\u01a4\u0001\u0000\u0000\u0000\u01a4\u01a5\u0006\u0002\u0002\u0000"+ - "\u01a5\u0015\u0001\u0000\u0000\u0000\u01a6\u01a7\u0005e\u0000\u0000\u01a7"+ - "\u01a8\u0005v\u0000\u0000\u01a8\u01a9\u0005a\u0000\u0000\u01a9\u01aa\u0005"+ - "l\u0000\u0000\u01aa\u01ab\u0001\u0000\u0000\u0000\u01ab\u01ac\u0006\u0003"+ - "\u0000\u0000\u01ac\u0017\u0001\u0000\u0000\u0000\u01ad\u01ae\u0005e\u0000"+ - "\u0000\u01ae\u01af\u0005x\u0000\u0000\u01af\u01b0\u0005p\u0000\u0000\u01b0"+ - "\u01b1\u0005l\u0000\u0000\u01b1\u01b2\u0005a\u0000\u0000\u01b2\u01b3\u0005"+ - "i\u0000\u0000\u01b3\u01b4\u0005n\u0000\u0000\u01b4\u01b5\u0001\u0000\u0000"+ - "\u0000\u01b5\u01b6\u0006\u0004\u0003\u0000\u01b6\u0019\u0001\u0000\u0000"+ - "\u0000\u01b7\u01b8\u0005f\u0000\u0000\u01b8\u01b9\u0005r\u0000\u0000\u01b9"+ - "\u01ba\u0005o\u0000\u0000\u01ba\u01bb\u0005m\u0000\u0000\u01bb\u01bc\u0001"+ - "\u0000\u0000\u0000\u01bc\u01bd\u0006\u0005\u0004\u0000\u01bd\u001b\u0001"+ - "\u0000\u0000\u0000\u01be\u01bf\u0005g\u0000\u0000\u01bf\u01c0\u0005r\u0000"+ - "\u0000\u01c0\u01c1\u0005o\u0000\u0000\u01c1\u01c2\u0005k\u0000\u0000\u01c2"+ - "\u01c3\u0001\u0000\u0000\u0000\u01c3\u01c4\u0006\u0006\u0000\u0000\u01c4"+ - "\u001d\u0001\u0000\u0000\u0000\u01c5\u01c6\u0005i\u0000\u0000\u01c6\u01c7"+ - "\u0005n\u0000\u0000\u01c7\u01c8\u0005l\u0000\u0000\u01c8\u01c9\u0005i"+ - "\u0000\u0000\u01c9\u01ca\u0005n\u0000\u0000\u01ca\u01cb\u0005e\u0000\u0000"+ - "\u01cb\u01cc\u0005s\u0000\u0000\u01cc\u01cd\u0005t\u0000\u0000\u01cd\u01ce"+ - "\u0005a\u0000\u0000\u01ce\u01cf\u0005t\u0000\u0000\u01cf\u01d0\u0005s"+ - "\u0000\u0000\u01d0\u01d1\u0001\u0000\u0000\u0000\u01d1\u01d2\u0006\u0007"+ - "\u0000\u0000\u01d2\u001f\u0001\u0000\u0000\u0000\u01d3\u01d4\u0005k\u0000"+ - "\u0000\u01d4\u01d5\u0005e\u0000\u0000\u01d5\u01d6\u0005e\u0000\u0000\u01d6"+ - "\u01d7\u0005p\u0000\u0000\u01d7\u01d8\u0001\u0000\u0000\u0000\u01d8\u01d9"+ - "\u0006\b\u0001\u0000\u01d9!\u0001\u0000\u0000\u0000\u01da\u01db\u0005"+ - "l\u0000\u0000\u01db\u01dc\u0005i\u0000\u0000\u01dc\u01dd\u0005m\u0000"+ - "\u0000\u01dd\u01de\u0005i\u0000\u0000\u01de\u01df\u0005t\u0000\u0000\u01df"+ - "\u01e0\u0001\u0000\u0000\u0000\u01e0\u01e1\u0006\t\u0000\u0000\u01e1#"+ - "\u0001\u0000\u0000\u0000\u01e2\u01e3\u0005l\u0000\u0000\u01e3\u01e4\u0005"+ - "o\u0000\u0000\u01e4\u01e5\u0005o\u0000\u0000\u01e5\u01e6\u0005k\u0000"+ - "\u0000\u01e6\u01e7\u0005u\u0000\u0000\u01e7\u01e8\u0005p\u0000\u0000\u01e8"+ - "\u01e9\u0001\u0000\u0000\u0000\u01e9\u01ea\u0006\n\u0005\u0000\u01ea%"+ - "\u0001\u0000\u0000\u0000\u01eb\u01ec\u0005m\u0000\u0000\u01ec\u01ed\u0005"+ - "e\u0000\u0000\u01ed\u01ee\u0005t\u0000\u0000\u01ee\u01ef\u0005a\u0000"+ - "\u0000\u01ef\u01f0\u0001\u0000\u0000\u0000\u01f0\u01f1\u0006\u000b\u0006"+ - "\u0000\u01f1\'\u0001\u0000\u0000\u0000\u01f2\u01f3\u0005m\u0000\u0000"+ - "\u01f3\u01f4\u0005e\u0000\u0000\u01f4\u01f5\u0005t\u0000\u0000\u01f5\u01f6"+ - "\u0005r\u0000\u0000\u01f6\u01f7\u0005i\u0000\u0000\u01f7\u01f8\u0005c"+ - "\u0000\u0000\u01f8\u01f9\u0005s\u0000\u0000\u01f9\u01fa\u0001\u0000\u0000"+ - "\u0000\u01fa\u01fb\u0006\f\u0007\u0000\u01fb)\u0001\u0000\u0000\u0000"+ - "\u01fc\u01fd\u0005m\u0000\u0000\u01fd\u01fe\u0005v\u0000\u0000\u01fe\u01ff"+ - "\u0005_\u0000\u0000\u01ff\u0200\u0005e\u0000\u0000\u0200\u0201\u0005x"+ - "\u0000\u0000\u0201\u0202\u0005p\u0000\u0000\u0202\u0203\u0005a\u0000\u0000"+ - "\u0203\u0204\u0005n\u0000\u0000\u0204\u0205\u0005d\u0000\u0000\u0205\u0206"+ - "\u0001\u0000\u0000\u0000\u0206\u0207\u0006\r\b\u0000\u0207+\u0001\u0000"+ - "\u0000\u0000\u0208\u0209\u0005r\u0000\u0000\u0209\u020a\u0005e\u0000\u0000"+ - "\u020a\u020b\u0005n\u0000\u0000\u020b\u020c\u0005a\u0000\u0000\u020c\u020d"+ - "\u0005m\u0000\u0000\u020d\u020e\u0005e\u0000\u0000\u020e\u020f\u0001\u0000"+ - "\u0000\u0000\u020f\u0210\u0006\u000e\t\u0000\u0210-\u0001\u0000\u0000"+ - "\u0000\u0211\u0212\u0005r\u0000\u0000\u0212\u0213\u0005o\u0000\u0000\u0213"+ - "\u0214\u0005w\u0000\u0000\u0214\u0215\u0001\u0000\u0000\u0000\u0215\u0216"+ - "\u0006\u000f\u0000\u0000\u0216/\u0001\u0000\u0000\u0000\u0217\u0218\u0005"+ - "s\u0000\u0000\u0218\u0219\u0005h\u0000\u0000\u0219\u021a\u0005o\u0000"+ - "\u0000\u021a\u021b\u0005w\u0000\u0000\u021b\u021c\u0001\u0000\u0000\u0000"+ - "\u021c\u021d\u0006\u0010\n\u0000\u021d1\u0001\u0000\u0000\u0000\u021e"+ - "\u021f\u0005s\u0000\u0000\u021f\u0220\u0005o\u0000\u0000\u0220\u0221\u0005"+ - "r\u0000\u0000\u0221\u0222\u0005t\u0000\u0000\u0222\u0223\u0001\u0000\u0000"+ - "\u0000\u0223\u0224\u0006\u0011\u0000\u0000\u02243\u0001\u0000\u0000\u0000"+ - "\u0225\u0226\u0005s\u0000\u0000\u0226\u0227\u0005t\u0000\u0000\u0227\u0228"+ - "\u0005a\u0000\u0000\u0228\u0229\u0005t\u0000\u0000\u0229\u022a\u0005s"+ - "\u0000\u0000\u022a\u022b\u0001\u0000\u0000\u0000\u022b\u022c\u0006\u0012"+ - "\u0000\u0000\u022c5\u0001\u0000\u0000\u0000\u022d\u022e\u0005w\u0000\u0000"+ - "\u022e\u022f\u0005h\u0000\u0000\u022f\u0230\u0005e\u0000\u0000\u0230\u0231"+ - "\u0005r\u0000\u0000\u0231\u0232\u0005e\u0000\u0000\u0232\u0233\u0001\u0000"+ - "\u0000\u0000\u0233\u0234\u0006\u0013\u0000\u0000\u02347\u0001\u0000\u0000"+ - "\u0000\u0235\u0237\b\u0000\u0000\u0000\u0236\u0235\u0001\u0000\u0000\u0000"+ - "\u0237\u0238\u0001\u0000\u0000\u0000\u0238\u0236\u0001\u0000\u0000\u0000"+ - "\u0238\u0239\u0001\u0000\u0000\u0000\u0239\u023a\u0001\u0000\u0000\u0000"+ - "\u023a\u023b\u0006\u0014\u0000\u0000\u023b9\u0001\u0000\u0000\u0000\u023c"+ - "\u023d\u0005/\u0000\u0000\u023d\u023e\u0005/\u0000\u0000\u023e\u0242\u0001"+ - "\u0000\u0000\u0000\u023f\u0241\b\u0001\u0000\u0000\u0240\u023f\u0001\u0000"+ - "\u0000\u0000\u0241\u0244\u0001\u0000\u0000\u0000\u0242\u0240\u0001\u0000"+ - "\u0000\u0000\u0242\u0243\u0001\u0000\u0000\u0000\u0243\u0246\u0001\u0000"+ - "\u0000\u0000\u0244\u0242\u0001\u0000\u0000\u0000\u0245\u0247\u0005\r\u0000"+ - "\u0000\u0246\u0245\u0001\u0000\u0000\u0000\u0246\u0247\u0001\u0000\u0000"+ - "\u0000\u0247\u0249\u0001\u0000\u0000\u0000\u0248\u024a\u0005\n\u0000\u0000"+ - "\u0249\u0248\u0001\u0000\u0000\u0000\u0249\u024a\u0001\u0000\u0000\u0000"+ - "\u024a\u024b\u0001\u0000\u0000\u0000\u024b\u024c\u0006\u0015\u000b\u0000"+ - "\u024c;\u0001\u0000\u0000\u0000\u024d\u024e\u0005/\u0000\u0000\u024e\u024f"+ - "\u0005*\u0000\u0000\u024f\u0254\u0001\u0000\u0000\u0000\u0250\u0253\u0003"+ - "<\u0016\u0000\u0251\u0253\t\u0000\u0000\u0000\u0252\u0250\u0001\u0000"+ - "\u0000\u0000\u0252\u0251\u0001\u0000\u0000\u0000\u0253\u0256\u0001\u0000"+ - "\u0000\u0000\u0254\u0255\u0001\u0000\u0000\u0000\u0254\u0252\u0001\u0000"+ - "\u0000\u0000\u0255\u0257\u0001\u0000\u0000\u0000\u0256\u0254\u0001\u0000"+ - "\u0000\u0000\u0257\u0258\u0005*\u0000\u0000\u0258\u0259\u0005/\u0000\u0000"+ - "\u0259\u025a\u0001\u0000\u0000\u0000\u025a\u025b\u0006\u0016\u000b\u0000"+ - "\u025b=\u0001\u0000\u0000\u0000\u025c\u025e\u0007\u0002\u0000\u0000\u025d"+ - "\u025c\u0001\u0000\u0000\u0000\u025e\u025f\u0001\u0000\u0000\u0000\u025f"+ - "\u025d\u0001\u0000\u0000\u0000\u025f\u0260\u0001\u0000\u0000\u0000\u0260"+ - "\u0261\u0001\u0000\u0000\u0000\u0261\u0262\u0006\u0017\u000b\u0000\u0262"+ - "?\u0001\u0000\u0000\u0000\u0263\u0267\b\u0003\u0000\u0000\u0264\u0265"+ - "\u0005/\u0000\u0000\u0265\u0267\b\u0004\u0000\u0000\u0266\u0263\u0001"+ - "\u0000\u0000\u0000\u0266\u0264\u0001\u0000\u0000\u0000\u0267A\u0001\u0000"+ - "\u0000\u0000\u0268\u026a\u0003@\u0018\u0000\u0269\u0268\u0001\u0000\u0000"+ - "\u0000\u026a\u026b\u0001\u0000\u0000\u0000\u026b\u0269\u0001\u0000\u0000"+ - "\u0000\u026b\u026c\u0001\u0000\u0000\u0000\u026cC\u0001\u0000\u0000\u0000"+ - "\u026d\u026e\u0003\u00b2Q\u0000\u026e\u026f\u0001\u0000\u0000\u0000\u026f"+ - "\u0270\u0006\u001a\f\u0000\u0270\u0271\u0006\u001a\r\u0000\u0271E\u0001"+ - "\u0000\u0000\u0000\u0272\u0273\u0003N\u001f\u0000\u0273\u0274\u0001\u0000"+ - "\u0000\u0000\u0274\u0275\u0006\u001b\u000e\u0000\u0275\u0276\u0006\u001b"+ - "\u000f\u0000\u0276G\u0001\u0000\u0000\u0000\u0277\u0278\u0003>\u0017\u0000"+ - "\u0278\u0279\u0001\u0000\u0000\u0000\u0279\u027a\u0006\u001c\u000b\u0000"+ - "\u027aI\u0001\u0000\u0000\u0000\u027b\u027c\u0003:\u0015\u0000\u027c\u027d"+ - "\u0001\u0000\u0000\u0000\u027d\u027e\u0006\u001d\u000b\u0000\u027eK\u0001"+ - "\u0000\u0000\u0000\u027f\u0280\u0003<\u0016\u0000\u0280\u0281\u0001\u0000"+ - "\u0000\u0000\u0281\u0282\u0006\u001e\u000b\u0000\u0282M\u0001\u0000\u0000"+ - "\u0000\u0283\u0284\u0005|\u0000\u0000\u0284\u0285\u0001\u0000\u0000\u0000"+ - "\u0285\u0286\u0006\u001f\u000f\u0000\u0286O\u0001\u0000\u0000\u0000\u0287"+ - "\u0288\u0007\u0005\u0000\u0000\u0288Q\u0001\u0000\u0000\u0000\u0289\u028a"+ - "\u0007\u0006\u0000\u0000\u028aS\u0001\u0000\u0000\u0000\u028b\u028c\u0005"+ - "\\\u0000\u0000\u028c\u028d\u0007\u0007\u0000\u0000\u028dU\u0001\u0000"+ - "\u0000\u0000\u028e\u028f\b\b\u0000\u0000\u028fW\u0001\u0000\u0000\u0000"+ - "\u0290\u0292\u0007\t\u0000\u0000\u0291\u0293\u0007\n\u0000\u0000\u0292"+ - "\u0291\u0001\u0000\u0000\u0000\u0292\u0293\u0001\u0000\u0000\u0000\u0293"+ - "\u0295\u0001\u0000\u0000\u0000\u0294\u0296\u0003P \u0000\u0295\u0294\u0001"+ - "\u0000\u0000\u0000\u0296\u0297\u0001\u0000\u0000\u0000\u0297\u0295\u0001"+ - "\u0000\u0000\u0000\u0297\u0298\u0001\u0000\u0000\u0000\u0298Y\u0001\u0000"+ - "\u0000\u0000\u0299\u029a\u0005@\u0000\u0000\u029a[\u0001\u0000\u0000\u0000"+ - "\u029b\u029c\u0005`\u0000\u0000\u029c]\u0001\u0000\u0000\u0000\u029d\u02a1"+ - "\b\u000b\u0000\u0000\u029e\u029f\u0005`\u0000\u0000\u029f\u02a1\u0005"+ - "`\u0000\u0000\u02a0\u029d\u0001\u0000\u0000\u0000\u02a0\u029e\u0001\u0000"+ - "\u0000\u0000\u02a1_\u0001\u0000\u0000\u0000\u02a2\u02a3\u0005_\u0000\u0000"+ - "\u02a3a\u0001\u0000\u0000\u0000\u02a4\u02a8\u0003R!\u0000\u02a5\u02a8"+ - "\u0003P \u0000\u02a6\u02a8\u0003`(\u0000\u02a7\u02a4\u0001\u0000\u0000"+ - "\u0000\u02a7\u02a5\u0001\u0000\u0000\u0000\u02a7\u02a6\u0001\u0000\u0000"+ - "\u0000\u02a8c\u0001\u0000\u0000\u0000\u02a9\u02ae\u0005\"\u0000\u0000"+ - "\u02aa\u02ad\u0003T\"\u0000\u02ab\u02ad\u0003V#\u0000\u02ac\u02aa\u0001"+ - "\u0000\u0000\u0000\u02ac\u02ab\u0001\u0000\u0000\u0000\u02ad\u02b0\u0001"+ - "\u0000\u0000\u0000\u02ae\u02ac\u0001\u0000\u0000\u0000\u02ae\u02af\u0001"+ - "\u0000\u0000\u0000\u02af\u02b1\u0001\u0000\u0000\u0000\u02b0\u02ae\u0001"+ - "\u0000\u0000\u0000\u02b1\u02c7\u0005\"\u0000\u0000\u02b2\u02b3\u0005\""+ - "\u0000\u0000\u02b3\u02b4\u0005\"\u0000\u0000\u02b4\u02b5\u0005\"\u0000"+ - "\u0000\u02b5\u02b9\u0001\u0000\u0000\u0000\u02b6\u02b8\b\u0001\u0000\u0000"+ - "\u02b7\u02b6\u0001\u0000\u0000\u0000\u02b8\u02bb\u0001\u0000\u0000\u0000"+ - "\u02b9\u02ba\u0001\u0000\u0000\u0000\u02b9\u02b7\u0001\u0000\u0000\u0000"+ - "\u02ba\u02bc\u0001\u0000\u0000\u0000\u02bb\u02b9\u0001\u0000\u0000\u0000"+ - "\u02bc\u02bd\u0005\"\u0000\u0000\u02bd\u02be\u0005\"\u0000\u0000\u02be"+ - "\u02bf\u0005\"\u0000\u0000\u02bf\u02c1\u0001\u0000\u0000\u0000\u02c0\u02c2"+ - "\u0005\"\u0000\u0000\u02c1\u02c0\u0001\u0000\u0000\u0000\u02c1\u02c2\u0001"+ - "\u0000\u0000\u0000\u02c2\u02c4\u0001\u0000\u0000\u0000\u02c3\u02c5\u0005"+ - "\"\u0000\u0000\u02c4\u02c3\u0001\u0000\u0000\u0000\u02c4\u02c5\u0001\u0000"+ - "\u0000\u0000\u02c5\u02c7\u0001\u0000\u0000\u0000\u02c6\u02a9\u0001\u0000"+ - "\u0000\u0000\u02c6\u02b2\u0001\u0000\u0000\u0000\u02c7e\u0001\u0000\u0000"+ - "\u0000\u02c8\u02ca\u0003P \u0000\u02c9\u02c8\u0001\u0000\u0000\u0000\u02ca"+ - "\u02cb\u0001\u0000\u0000\u0000\u02cb\u02c9\u0001\u0000\u0000\u0000\u02cb"+ - "\u02cc\u0001\u0000\u0000\u0000\u02ccg\u0001\u0000\u0000\u0000\u02cd\u02cf"+ - "\u0003P \u0000\u02ce\u02cd\u0001\u0000\u0000\u0000\u02cf\u02d0\u0001\u0000"+ - "\u0000\u0000\u02d0\u02ce\u0001\u0000\u0000\u0000\u02d0\u02d1\u0001\u0000"+ - "\u0000\u0000\u02d1\u02d2\u0001\u0000\u0000\u0000\u02d2\u02d6\u0003x4\u0000"+ - "\u02d3\u02d5\u0003P \u0000\u02d4\u02d3\u0001\u0000\u0000\u0000\u02d5\u02d8"+ - "\u0001\u0000\u0000\u0000\u02d6\u02d4\u0001\u0000\u0000\u0000\u02d6\u02d7"+ - "\u0001\u0000\u0000\u0000\u02d7\u02f8\u0001\u0000\u0000\u0000\u02d8\u02d6"+ - "\u0001\u0000\u0000\u0000\u02d9\u02db\u0003x4\u0000\u02da\u02dc\u0003P"+ - " \u0000\u02db\u02da\u0001\u0000\u0000\u0000\u02dc\u02dd\u0001\u0000\u0000"+ - "\u0000\u02dd\u02db\u0001\u0000\u0000\u0000\u02dd\u02de\u0001\u0000\u0000"+ - "\u0000\u02de\u02f8\u0001\u0000\u0000\u0000\u02df\u02e1\u0003P \u0000\u02e0"+ - "\u02df\u0001\u0000\u0000\u0000\u02e1\u02e2\u0001\u0000\u0000\u0000\u02e2"+ - "\u02e0\u0001\u0000\u0000\u0000\u02e2\u02e3\u0001\u0000\u0000\u0000\u02e3"+ - "\u02eb\u0001\u0000\u0000\u0000\u02e4\u02e8\u0003x4\u0000\u02e5\u02e7\u0003"+ - "P \u0000\u02e6\u02e5\u0001\u0000\u0000\u0000\u02e7\u02ea\u0001\u0000\u0000"+ - "\u0000\u02e8\u02e6\u0001\u0000\u0000\u0000\u02e8\u02e9\u0001\u0000\u0000"+ - "\u0000\u02e9\u02ec\u0001\u0000\u0000\u0000\u02ea\u02e8\u0001\u0000\u0000"+ - "\u0000\u02eb\u02e4\u0001\u0000\u0000\u0000\u02eb\u02ec\u0001\u0000\u0000"+ - "\u0000\u02ec\u02ed\u0001\u0000\u0000\u0000\u02ed\u02ee\u0003X$\u0000\u02ee"+ - "\u02f8\u0001\u0000\u0000\u0000\u02ef\u02f1\u0003x4\u0000\u02f0\u02f2\u0003"+ - "P \u0000\u02f1\u02f0\u0001\u0000\u0000\u0000\u02f2\u02f3\u0001\u0000\u0000"+ - "\u0000\u02f3\u02f1\u0001\u0000\u0000\u0000\u02f3\u02f4\u0001\u0000\u0000"+ + "\u0000\u000f\u018c\u0001\u0000\u0000\u0000\u000f\u018e\u0001\u0000\u0000"+ + "\u0000\u000f\u0190\u0001\u0000\u0000\u0000\u000f\u0192\u0001\u0000\u0000"+ + "\u0000\u0010\u0194\u0001\u0000\u0000\u0000\u0012\u019e\u0001\u0000\u0000"+ + "\u0000\u0014\u01a5\u0001\u0000\u0000\u0000\u0016\u01ae\u0001\u0000\u0000"+ + "\u0000\u0018\u01b5\u0001\u0000\u0000\u0000\u001a\u01bf\u0001\u0000\u0000"+ + "\u0000\u001c\u01c6\u0001\u0000\u0000\u0000\u001e\u01cd\u0001\u0000\u0000"+ + "\u0000 \u01db\u0001\u0000\u0000\u0000\"\u01e2\u0001\u0000\u0000\u0000"+ + "$\u01ea\u0001\u0000\u0000\u0000&\u01f3\u0001\u0000\u0000\u0000(\u01fa"+ + "\u0001\u0000\u0000\u0000*\u0204\u0001\u0000\u0000\u0000,\u0210\u0001\u0000"+ + "\u0000\u0000.\u0219\u0001\u0000\u0000\u00000\u021f\u0001\u0000\u0000\u0000"+ + "2\u0226\u0001\u0000\u0000\u00004\u022d\u0001\u0000\u0000\u00006\u0235"+ + "\u0001\u0000\u0000\u00008\u023e\u0001\u0000\u0000\u0000:\u0244\u0001\u0000"+ + "\u0000\u0000<\u0255\u0001\u0000\u0000\u0000>\u0265\u0001\u0000\u0000\u0000"+ + "@\u026e\u0001\u0000\u0000\u0000B\u0271\u0001\u0000\u0000\u0000D\u0275"+ + "\u0001\u0000\u0000\u0000F\u027a\u0001\u0000\u0000\u0000H\u027f\u0001\u0000"+ + "\u0000\u0000J\u0283\u0001\u0000\u0000\u0000L\u0287\u0001\u0000\u0000\u0000"+ + "N\u028b\u0001\u0000\u0000\u0000P\u028f\u0001\u0000\u0000\u0000R\u0291"+ + "\u0001\u0000\u0000\u0000T\u0293\u0001\u0000\u0000\u0000V\u0296\u0001\u0000"+ + "\u0000\u0000X\u0298\u0001\u0000\u0000\u0000Z\u02a1\u0001\u0000\u0000\u0000"+ + "\\\u02a3\u0001\u0000\u0000\u0000^\u02a8\u0001\u0000\u0000\u0000`\u02aa"+ + "\u0001\u0000\u0000\u0000b\u02af\u0001\u0000\u0000\u0000d\u02ce\u0001\u0000"+ + "\u0000\u0000f\u02d1\u0001\u0000\u0000\u0000h\u02ff\u0001\u0000\u0000\u0000"+ + "j\u0301\u0001\u0000\u0000\u0000l\u0304\u0001\u0000\u0000\u0000n\u0308"+ + "\u0001\u0000\u0000\u0000p\u030c\u0001\u0000\u0000\u0000r\u030e\u0001\u0000"+ + "\u0000\u0000t\u0311\u0001\u0000\u0000\u0000v\u0313\u0001\u0000\u0000\u0000"+ + "x\u0318\u0001\u0000\u0000\u0000z\u031a\u0001\u0000\u0000\u0000|\u0320"+ + "\u0001\u0000\u0000\u0000~\u0326\u0001\u0000\u0000\u0000\u0080\u032b\u0001"+ + "\u0000\u0000\u0000\u0082\u032d\u0001\u0000\u0000\u0000\u0084\u0330\u0001"+ + "\u0000\u0000\u0000\u0086\u0333\u0001\u0000\u0000\u0000\u0088\u0338\u0001"+ + "\u0000\u0000\u0000\u008a\u033c\u0001\u0000\u0000\u0000\u008c\u0341\u0001"+ + "\u0000\u0000\u0000\u008e\u0347\u0001\u0000\u0000\u0000\u0090\u034a\u0001"+ + "\u0000\u0000\u0000\u0092\u034c\u0001\u0000\u0000\u0000\u0094\u0352\u0001"+ + "\u0000\u0000\u0000\u0096\u0354\u0001\u0000\u0000\u0000\u0098\u0359\u0001"+ + "\u0000\u0000\u0000\u009a\u035c\u0001\u0000\u0000\u0000\u009c\u035f\u0001"+ + "\u0000\u0000\u0000\u009e\u0362\u0001\u0000\u0000\u0000\u00a0\u0364\u0001"+ + "\u0000\u0000\u0000\u00a2\u0367\u0001\u0000\u0000\u0000\u00a4\u0369\u0001"+ + "\u0000\u0000\u0000\u00a6\u036c\u0001\u0000\u0000\u0000\u00a8\u036e\u0001"+ + "\u0000\u0000\u0000\u00aa\u0370\u0001\u0000\u0000\u0000\u00ac\u0372\u0001"+ + "\u0000\u0000\u0000\u00ae\u0374\u0001\u0000\u0000\u0000\u00b0\u0384\u0001"+ + "\u0000\u0000\u0000\u00b2\u0386\u0001\u0000\u0000\u0000\u00b4\u038b\u0001"+ + "\u0000\u0000\u0000\u00b6\u03a0\u0001\u0000\u0000\u0000\u00b8\u03a2\u0001"+ + "\u0000\u0000\u0000\u00ba\u03aa\u0001\u0000\u0000\u0000\u00bc\u03ac\u0001"+ + "\u0000\u0000\u0000\u00be\u03b0\u0001\u0000\u0000\u0000\u00c0\u03b4\u0001"+ + "\u0000\u0000\u0000\u00c2\u03b8\u0001\u0000\u0000\u0000\u00c4\u03bd\u0001"+ + "\u0000\u0000\u0000\u00c6\u03c1\u0001\u0000\u0000\u0000\u00c8\u03c5\u0001"+ + "\u0000\u0000\u0000\u00ca\u03c9\u0001\u0000\u0000\u0000\u00cc\u03cd\u0001"+ + "\u0000\u0000\u0000\u00ce\u03d1\u0001\u0000\u0000\u0000\u00d0\u03da\u0001"+ + "\u0000\u0000\u0000\u00d2\u03de\u0001\u0000\u0000\u0000\u00d4\u03e2\u0001"+ + "\u0000\u0000\u0000\u00d6\u03e6\u0001\u0000\u0000\u0000\u00d8\u03ea\u0001"+ + "\u0000\u0000\u0000\u00da\u03ee\u0001\u0000\u0000\u0000\u00dc\u03f3\u0001"+ + "\u0000\u0000\u0000\u00de\u03f7\u0001\u0000\u0000\u0000\u00e0\u03ff\u0001"+ + "\u0000\u0000\u0000\u00e2\u0414\u0001\u0000\u0000\u0000\u00e4\u0418\u0001"+ + "\u0000\u0000\u0000\u00e6\u041c\u0001\u0000\u0000\u0000\u00e8\u0420\u0001"+ + "\u0000\u0000\u0000\u00ea\u0424\u0001\u0000\u0000\u0000\u00ec\u0428\u0001"+ + "\u0000\u0000\u0000\u00ee\u042d\u0001\u0000\u0000\u0000\u00f0\u0431\u0001"+ + "\u0000\u0000\u0000\u00f2\u0435\u0001\u0000\u0000\u0000\u00f4\u0439\u0001"+ + "\u0000\u0000\u0000\u00f6\u043c\u0001\u0000\u0000\u0000\u00f8\u0440\u0001"+ + "\u0000\u0000\u0000\u00fa\u0444\u0001\u0000\u0000\u0000\u00fc\u0448\u0001"+ + "\u0000\u0000\u0000\u00fe\u044c\u0001\u0000\u0000\u0000\u0100\u0451\u0001"+ + "\u0000\u0000\u0000\u0102\u0456\u0001\u0000\u0000\u0000\u0104\u045b\u0001"+ + "\u0000\u0000\u0000\u0106\u0462\u0001\u0000\u0000\u0000\u0108\u046b\u0001"+ + "\u0000\u0000\u0000\u010a\u0472\u0001\u0000\u0000\u0000\u010c\u0476\u0001"+ + "\u0000\u0000\u0000\u010e\u047a\u0001\u0000\u0000\u0000\u0110\u047e\u0001"+ + "\u0000\u0000\u0000\u0112\u0482\u0001\u0000\u0000\u0000\u0114\u0488\u0001"+ + "\u0000\u0000\u0000\u0116\u048c\u0001\u0000\u0000\u0000\u0118\u0490\u0001"+ + "\u0000\u0000\u0000\u011a\u0494\u0001\u0000\u0000\u0000\u011c\u0498\u0001"+ + "\u0000\u0000\u0000\u011e\u049c\u0001\u0000\u0000\u0000\u0120\u04a0\u0001"+ + "\u0000\u0000\u0000\u0122\u04a4\u0001\u0000\u0000\u0000\u0124\u04a8\u0001"+ + "\u0000\u0000\u0000\u0126\u04ac\u0001\u0000\u0000\u0000\u0128\u04b1\u0001"+ + "\u0000\u0000\u0000\u012a\u04b5\u0001\u0000\u0000\u0000\u012c\u04b9\u0001"+ + "\u0000\u0000\u0000\u012e\u04bd\u0001\u0000\u0000\u0000\u0130\u04c2\u0001"+ + "\u0000\u0000\u0000\u0132\u04c6\u0001\u0000\u0000\u0000\u0134\u04ca\u0001"+ + "\u0000\u0000\u0000\u0136\u04ce\u0001\u0000\u0000\u0000\u0138\u04d2\u0001"+ + "\u0000\u0000\u0000\u013a\u04d6\u0001\u0000\u0000\u0000\u013c\u04dc\u0001"+ + "\u0000\u0000\u0000\u013e\u04e0\u0001\u0000\u0000\u0000\u0140\u04e4\u0001"+ + "\u0000\u0000\u0000\u0142\u04e8\u0001\u0000\u0000\u0000\u0144\u04ec\u0001"+ + "\u0000\u0000\u0000\u0146\u04f0\u0001\u0000\u0000\u0000\u0148\u04f4\u0001"+ + "\u0000\u0000\u0000\u014a\u04f9\u0001\u0000\u0000\u0000\u014c\u04fd\u0001"+ + "\u0000\u0000\u0000\u014e\u0501\u0001\u0000\u0000\u0000\u0150\u0505\u0001"+ + "\u0000\u0000\u0000\u0152\u0509\u0001\u0000\u0000\u0000\u0154\u050d\u0001"+ + "\u0000\u0000\u0000\u0156\u0511\u0001\u0000\u0000\u0000\u0158\u0516\u0001"+ + "\u0000\u0000\u0000\u015a\u051b\u0001\u0000\u0000\u0000\u015c\u051f\u0001"+ + "\u0000\u0000\u0000\u015e\u0523\u0001\u0000\u0000\u0000\u0160\u0527\u0001"+ + "\u0000\u0000\u0000\u0162\u052c\u0001\u0000\u0000\u0000\u0164\u0536\u0001"+ + "\u0000\u0000\u0000\u0166\u053a\u0001\u0000\u0000\u0000\u0168\u053e\u0001"+ + "\u0000\u0000\u0000\u016a\u0542\u0001\u0000\u0000\u0000\u016c\u0547\u0001"+ + "\u0000\u0000\u0000\u016e\u054e\u0001\u0000\u0000\u0000\u0170\u0552\u0001"+ + "\u0000\u0000\u0000\u0172\u0556\u0001\u0000\u0000\u0000\u0174\u055a\u0001"+ + "\u0000\u0000\u0000\u0176\u055e\u0001\u0000\u0000\u0000\u0178\u0563\u0001"+ + "\u0000\u0000\u0000\u017a\u0569\u0001\u0000\u0000\u0000\u017c\u056f\u0001"+ + "\u0000\u0000\u0000\u017e\u0573\u0001\u0000\u0000\u0000\u0180\u0577\u0001"+ + "\u0000\u0000\u0000\u0182\u057b\u0001\u0000\u0000\u0000\u0184\u0581\u0001"+ + "\u0000\u0000\u0000\u0186\u0587\u0001\u0000\u0000\u0000\u0188\u058b\u0001"+ + "\u0000\u0000\u0000\u018a\u058f\u0001\u0000\u0000\u0000\u018c\u0593\u0001"+ + "\u0000\u0000\u0000\u018e\u0599\u0001\u0000\u0000\u0000\u0190\u059f\u0001"+ + "\u0000\u0000\u0000\u0192\u05a5\u0001\u0000\u0000\u0000\u0194\u0195\u0005"+ + "d\u0000\u0000\u0195\u0196\u0005i\u0000\u0000\u0196\u0197\u0005s\u0000"+ + "\u0000\u0197\u0198\u0005s\u0000\u0000\u0198\u0199\u0005e\u0000\u0000\u0199"+ + "\u019a\u0005c\u0000\u0000\u019a\u019b\u0005t\u0000\u0000\u019b\u019c\u0001"+ + "\u0000\u0000\u0000\u019c\u019d\u0006\u0000\u0000\u0000\u019d\u0011\u0001"+ + "\u0000\u0000\u0000\u019e\u019f\u0005d\u0000\u0000\u019f\u01a0\u0005r\u0000"+ + "\u0000\u01a0\u01a1\u0005o\u0000\u0000\u01a1\u01a2\u0005p\u0000\u0000\u01a2"+ + "\u01a3\u0001\u0000\u0000\u0000\u01a3\u01a4\u0006\u0001\u0001\u0000\u01a4"+ + "\u0013\u0001\u0000\u0000\u0000\u01a5\u01a6\u0005e\u0000\u0000\u01a6\u01a7"+ + "\u0005n\u0000\u0000\u01a7\u01a8\u0005r\u0000\u0000\u01a8\u01a9\u0005i"+ + "\u0000\u0000\u01a9\u01aa\u0005c\u0000\u0000\u01aa\u01ab\u0005h\u0000\u0000"+ + "\u01ab\u01ac\u0001\u0000\u0000\u0000\u01ac\u01ad\u0006\u0002\u0002\u0000"+ + "\u01ad\u0015\u0001\u0000\u0000\u0000\u01ae\u01af\u0005e\u0000\u0000\u01af"+ + "\u01b0\u0005v\u0000\u0000\u01b0\u01b1\u0005a\u0000\u0000\u01b1\u01b2\u0005"+ + "l\u0000\u0000\u01b2\u01b3\u0001\u0000\u0000\u0000\u01b3\u01b4\u0006\u0003"+ + "\u0000\u0000\u01b4\u0017\u0001\u0000\u0000\u0000\u01b5\u01b6\u0005e\u0000"+ + "\u0000\u01b6\u01b7\u0005x\u0000\u0000\u01b7\u01b8\u0005p\u0000\u0000\u01b8"+ + "\u01b9\u0005l\u0000\u0000\u01b9\u01ba\u0005a\u0000\u0000\u01ba\u01bb\u0005"+ + "i\u0000\u0000\u01bb\u01bc\u0005n\u0000\u0000\u01bc\u01bd\u0001\u0000\u0000"+ + "\u0000\u01bd\u01be\u0006\u0004\u0003\u0000\u01be\u0019\u0001\u0000\u0000"+ + "\u0000\u01bf\u01c0\u0005f\u0000\u0000\u01c0\u01c1\u0005r\u0000\u0000\u01c1"+ + "\u01c2\u0005o\u0000\u0000\u01c2\u01c3\u0005m\u0000\u0000\u01c3\u01c4\u0001"+ + "\u0000\u0000\u0000\u01c4\u01c5\u0006\u0005\u0004\u0000\u01c5\u001b\u0001"+ + "\u0000\u0000\u0000\u01c6\u01c7\u0005g\u0000\u0000\u01c7\u01c8\u0005r\u0000"+ + "\u0000\u01c8\u01c9\u0005o\u0000\u0000\u01c9\u01ca\u0005k\u0000\u0000\u01ca"+ + "\u01cb\u0001\u0000\u0000\u0000\u01cb\u01cc\u0006\u0006\u0000\u0000\u01cc"+ + "\u001d\u0001\u0000\u0000\u0000\u01cd\u01ce\u0005i\u0000\u0000\u01ce\u01cf"+ + "\u0005n\u0000\u0000\u01cf\u01d0\u0005l\u0000\u0000\u01d0\u01d1\u0005i"+ + "\u0000\u0000\u01d1\u01d2\u0005n\u0000\u0000\u01d2\u01d3\u0005e\u0000\u0000"+ + "\u01d3\u01d4\u0005s\u0000\u0000\u01d4\u01d5\u0005t\u0000\u0000\u01d5\u01d6"+ + "\u0005a\u0000\u0000\u01d6\u01d7\u0005t\u0000\u0000\u01d7\u01d8\u0005s"+ + "\u0000\u0000\u01d8\u01d9\u0001\u0000\u0000\u0000\u01d9\u01da\u0006\u0007"+ + "\u0000\u0000\u01da\u001f\u0001\u0000\u0000\u0000\u01db\u01dc\u0005k\u0000"+ + "\u0000\u01dc\u01dd\u0005e\u0000\u0000\u01dd\u01de\u0005e\u0000\u0000\u01de"+ + "\u01df\u0005p\u0000\u0000\u01df\u01e0\u0001\u0000\u0000\u0000\u01e0\u01e1"+ + "\u0006\b\u0001\u0000\u01e1!\u0001\u0000\u0000\u0000\u01e2\u01e3\u0005"+ + "l\u0000\u0000\u01e3\u01e4\u0005i\u0000\u0000\u01e4\u01e5\u0005m\u0000"+ + "\u0000\u01e5\u01e6\u0005i\u0000\u0000\u01e6\u01e7\u0005t\u0000\u0000\u01e7"+ + "\u01e8\u0001\u0000\u0000\u0000\u01e8\u01e9\u0006\t\u0000\u0000\u01e9#"+ + "\u0001\u0000\u0000\u0000\u01ea\u01eb\u0005l\u0000\u0000\u01eb\u01ec\u0005"+ + "o\u0000\u0000\u01ec\u01ed\u0005o\u0000\u0000\u01ed\u01ee\u0005k\u0000"+ + "\u0000\u01ee\u01ef\u0005u\u0000\u0000\u01ef\u01f0\u0005p\u0000\u0000\u01f0"+ + "\u01f1\u0001\u0000\u0000\u0000\u01f1\u01f2\u0006\n\u0005\u0000\u01f2%"+ + "\u0001\u0000\u0000\u0000\u01f3\u01f4\u0005m\u0000\u0000\u01f4\u01f5\u0005"+ + "e\u0000\u0000\u01f5\u01f6\u0005t\u0000\u0000\u01f6\u01f7\u0005a\u0000"+ + "\u0000\u01f7\u01f8\u0001\u0000\u0000\u0000\u01f8\u01f9\u0006\u000b\u0006"+ + "\u0000\u01f9\'\u0001\u0000\u0000\u0000\u01fa\u01fb\u0005m\u0000\u0000"+ + "\u01fb\u01fc\u0005e\u0000\u0000\u01fc\u01fd\u0005t\u0000\u0000\u01fd\u01fe"+ + "\u0005r\u0000\u0000\u01fe\u01ff\u0005i\u0000\u0000\u01ff\u0200\u0005c"+ + "\u0000\u0000\u0200\u0201\u0005s\u0000\u0000\u0201\u0202\u0001\u0000\u0000"+ + "\u0000\u0202\u0203\u0006\f\u0007\u0000\u0203)\u0001\u0000\u0000\u0000"+ + "\u0204\u0205\u0005m\u0000\u0000\u0205\u0206\u0005v\u0000\u0000\u0206\u0207"+ + "\u0005_\u0000\u0000\u0207\u0208\u0005e\u0000\u0000\u0208\u0209\u0005x"+ + "\u0000\u0000\u0209\u020a\u0005p\u0000\u0000\u020a\u020b\u0005a\u0000\u0000"+ + "\u020b\u020c\u0005n\u0000\u0000\u020c\u020d\u0005d\u0000\u0000\u020d\u020e"+ + "\u0001\u0000\u0000\u0000\u020e\u020f\u0006\r\b\u0000\u020f+\u0001\u0000"+ + "\u0000\u0000\u0210\u0211\u0005r\u0000\u0000\u0211\u0212\u0005e\u0000\u0000"+ + "\u0212\u0213\u0005n\u0000\u0000\u0213\u0214\u0005a\u0000\u0000\u0214\u0215"+ + "\u0005m\u0000\u0000\u0215\u0216\u0005e\u0000\u0000\u0216\u0217\u0001\u0000"+ + "\u0000\u0000\u0217\u0218\u0006\u000e\t\u0000\u0218-\u0001\u0000\u0000"+ + "\u0000\u0219\u021a\u0005r\u0000\u0000\u021a\u021b\u0005o\u0000\u0000\u021b"+ + "\u021c\u0005w\u0000\u0000\u021c\u021d\u0001\u0000\u0000\u0000\u021d\u021e"+ + "\u0006\u000f\u0000\u0000\u021e/\u0001\u0000\u0000\u0000\u021f\u0220\u0005"+ + "s\u0000\u0000\u0220\u0221\u0005h\u0000\u0000\u0221\u0222\u0005o\u0000"+ + "\u0000\u0222\u0223\u0005w\u0000\u0000\u0223\u0224\u0001\u0000\u0000\u0000"+ + "\u0224\u0225\u0006\u0010\n\u0000\u02251\u0001\u0000\u0000\u0000\u0226"+ + "\u0227\u0005s\u0000\u0000\u0227\u0228\u0005o\u0000\u0000\u0228\u0229\u0005"+ + "r\u0000\u0000\u0229\u022a\u0005t\u0000\u0000\u022a\u022b\u0001\u0000\u0000"+ + "\u0000\u022b\u022c\u0006\u0011\u0000\u0000\u022c3\u0001\u0000\u0000\u0000"+ + "\u022d\u022e\u0005s\u0000\u0000\u022e\u022f\u0005t\u0000\u0000\u022f\u0230"+ + "\u0005a\u0000\u0000\u0230\u0231\u0005t\u0000\u0000\u0231\u0232\u0005s"+ + "\u0000\u0000\u0232\u0233\u0001\u0000\u0000\u0000\u0233\u0234\u0006\u0012"+ + "\u0000\u0000\u02345\u0001\u0000\u0000\u0000\u0235\u0236\u0005w\u0000\u0000"+ + "\u0236\u0237\u0005h\u0000\u0000\u0237\u0238\u0005e\u0000\u0000\u0238\u0239"+ + "\u0005r\u0000\u0000\u0239\u023a\u0005e\u0000\u0000\u023a\u023b\u0001\u0000"+ + "\u0000\u0000\u023b\u023c\u0006\u0013\u0000\u0000\u023c7\u0001\u0000\u0000"+ + "\u0000\u023d\u023f\b\u0000\u0000\u0000\u023e\u023d\u0001\u0000\u0000\u0000"+ + "\u023f\u0240\u0001\u0000\u0000\u0000\u0240\u023e\u0001\u0000\u0000\u0000"+ + "\u0240\u0241\u0001\u0000\u0000\u0000\u0241\u0242\u0001\u0000\u0000\u0000"+ + "\u0242\u0243\u0006\u0014\u0000\u0000\u02439\u0001\u0000\u0000\u0000\u0244"+ + "\u0245\u0005/\u0000\u0000\u0245\u0246\u0005/\u0000\u0000\u0246\u024a\u0001"+ + "\u0000\u0000\u0000\u0247\u0249\b\u0001\u0000\u0000\u0248\u0247\u0001\u0000"+ + "\u0000\u0000\u0249\u024c\u0001\u0000\u0000\u0000\u024a\u0248\u0001\u0000"+ + "\u0000\u0000\u024a\u024b\u0001\u0000\u0000\u0000\u024b\u024e\u0001\u0000"+ + "\u0000\u0000\u024c\u024a\u0001\u0000\u0000\u0000\u024d\u024f\u0005\r\u0000"+ + "\u0000\u024e\u024d\u0001\u0000\u0000\u0000\u024e\u024f\u0001\u0000\u0000"+ + "\u0000\u024f\u0251\u0001\u0000\u0000\u0000\u0250\u0252\u0005\n\u0000\u0000"+ + "\u0251\u0250\u0001\u0000\u0000\u0000\u0251\u0252\u0001\u0000\u0000\u0000"+ + "\u0252\u0253\u0001\u0000\u0000\u0000\u0253\u0254\u0006\u0015\u000b\u0000"+ + "\u0254;\u0001\u0000\u0000\u0000\u0255\u0256\u0005/\u0000\u0000\u0256\u0257"+ + "\u0005*\u0000\u0000\u0257\u025c\u0001\u0000\u0000\u0000\u0258\u025b\u0003"+ + "<\u0016\u0000\u0259\u025b\t\u0000\u0000\u0000\u025a\u0258\u0001\u0000"+ + "\u0000\u0000\u025a\u0259\u0001\u0000\u0000\u0000\u025b\u025e\u0001\u0000"+ + "\u0000\u0000\u025c\u025d\u0001\u0000\u0000\u0000\u025c\u025a\u0001\u0000"+ + "\u0000\u0000\u025d\u025f\u0001\u0000\u0000\u0000\u025e\u025c\u0001\u0000"+ + "\u0000\u0000\u025f\u0260\u0005*\u0000\u0000\u0260\u0261\u0005/\u0000\u0000"+ + "\u0261\u0262\u0001\u0000\u0000\u0000\u0262\u0263\u0006\u0016\u000b\u0000"+ + "\u0263=\u0001\u0000\u0000\u0000\u0264\u0266\u0007\u0002\u0000\u0000\u0265"+ + "\u0264\u0001\u0000\u0000\u0000\u0266\u0267\u0001\u0000\u0000\u0000\u0267"+ + "\u0265\u0001\u0000\u0000\u0000\u0267\u0268\u0001\u0000\u0000\u0000\u0268"+ + "\u0269\u0001\u0000\u0000\u0000\u0269\u026a\u0006\u0017\u000b\u0000\u026a"+ + "?\u0001\u0000\u0000\u0000\u026b\u026f\b\u0003\u0000\u0000\u026c\u026d"+ + "\u0005/\u0000\u0000\u026d\u026f\b\u0004\u0000\u0000\u026e\u026b\u0001"+ + "\u0000\u0000\u0000\u026e\u026c\u0001\u0000\u0000\u0000\u026fA\u0001\u0000"+ + "\u0000\u0000\u0270\u0272\u0003@\u0018\u0000\u0271\u0270\u0001\u0000\u0000"+ + "\u0000\u0272\u0273\u0001\u0000\u0000\u0000\u0273\u0271\u0001\u0000\u0000"+ + "\u0000\u0273\u0274\u0001\u0000\u0000\u0000\u0274C\u0001\u0000\u0000\u0000"+ + "\u0275\u0276\u0003\u00b2Q\u0000\u0276\u0277\u0001\u0000\u0000\u0000\u0277"+ + "\u0278\u0006\u001a\f\u0000\u0278\u0279\u0006\u001a\r\u0000\u0279E\u0001"+ + "\u0000\u0000\u0000\u027a\u027b\u0003N\u001f\u0000\u027b\u027c\u0001\u0000"+ + "\u0000\u0000\u027c\u027d\u0006\u001b\u000e\u0000\u027d\u027e\u0006\u001b"+ + "\u000f\u0000\u027eG\u0001\u0000\u0000\u0000\u027f\u0280\u0003>\u0017\u0000"+ + "\u0280\u0281\u0001\u0000\u0000\u0000\u0281\u0282\u0006\u001c\u000b\u0000"+ + "\u0282I\u0001\u0000\u0000\u0000\u0283\u0284\u0003:\u0015\u0000\u0284\u0285"+ + "\u0001\u0000\u0000\u0000\u0285\u0286\u0006\u001d\u000b\u0000\u0286K\u0001"+ + "\u0000\u0000\u0000\u0287\u0288\u0003<\u0016\u0000\u0288\u0289\u0001\u0000"+ + "\u0000\u0000\u0289\u028a\u0006\u001e\u000b\u0000\u028aM\u0001\u0000\u0000"+ + "\u0000\u028b\u028c\u0005|\u0000\u0000\u028c\u028d\u0001\u0000\u0000\u0000"+ + "\u028d\u028e\u0006\u001f\u000f\u0000\u028eO\u0001\u0000\u0000\u0000\u028f"+ + "\u0290\u0007\u0005\u0000\u0000\u0290Q\u0001\u0000\u0000\u0000\u0291\u0292"+ + "\u0007\u0006\u0000\u0000\u0292S\u0001\u0000\u0000\u0000\u0293\u0294\u0005"+ + "\\\u0000\u0000\u0294\u0295\u0007\u0007\u0000\u0000\u0295U\u0001\u0000"+ + "\u0000\u0000\u0296\u0297\b\b\u0000\u0000\u0297W\u0001\u0000\u0000\u0000"+ + "\u0298\u029a\u0007\t\u0000\u0000\u0299\u029b\u0007\n\u0000\u0000\u029a"+ + "\u0299\u0001\u0000\u0000\u0000\u029a\u029b\u0001\u0000\u0000\u0000\u029b"+ + "\u029d\u0001\u0000\u0000\u0000\u029c\u029e\u0003P \u0000\u029d\u029c\u0001"+ + "\u0000\u0000\u0000\u029e\u029f\u0001\u0000\u0000\u0000\u029f\u029d\u0001"+ + "\u0000\u0000\u0000\u029f\u02a0\u0001\u0000\u0000\u0000\u02a0Y\u0001\u0000"+ + "\u0000\u0000\u02a1\u02a2\u0005@\u0000\u0000\u02a2[\u0001\u0000\u0000\u0000"+ + "\u02a3\u02a4\u0005`\u0000\u0000\u02a4]\u0001\u0000\u0000\u0000\u02a5\u02a9"+ + "\b\u000b\u0000\u0000\u02a6\u02a7\u0005`\u0000\u0000\u02a7\u02a9\u0005"+ + "`\u0000\u0000\u02a8\u02a5\u0001\u0000\u0000\u0000\u02a8\u02a6\u0001\u0000"+ + "\u0000\u0000\u02a9_\u0001\u0000\u0000\u0000\u02aa\u02ab\u0005_\u0000\u0000"+ + "\u02aba\u0001\u0000\u0000\u0000\u02ac\u02b0\u0003R!\u0000\u02ad\u02b0"+ + "\u0003P \u0000\u02ae\u02b0\u0003`(\u0000\u02af\u02ac\u0001\u0000\u0000"+ + "\u0000\u02af\u02ad\u0001\u0000\u0000\u0000\u02af\u02ae\u0001\u0000\u0000"+ + "\u0000\u02b0c\u0001\u0000\u0000\u0000\u02b1\u02b6\u0005\"\u0000\u0000"+ + "\u02b2\u02b5\u0003T\"\u0000\u02b3\u02b5\u0003V#\u0000\u02b4\u02b2\u0001"+ + "\u0000\u0000\u0000\u02b4\u02b3\u0001\u0000\u0000\u0000\u02b5\u02b8\u0001"+ + "\u0000\u0000\u0000\u02b6\u02b4\u0001\u0000\u0000\u0000\u02b6\u02b7\u0001"+ + "\u0000\u0000\u0000\u02b7\u02b9\u0001\u0000\u0000\u0000\u02b8\u02b6\u0001"+ + "\u0000\u0000\u0000\u02b9\u02cf\u0005\"\u0000\u0000\u02ba\u02bb\u0005\""+ + "\u0000\u0000\u02bb\u02bc\u0005\"\u0000\u0000\u02bc\u02bd\u0005\"\u0000"+ + "\u0000\u02bd\u02c1\u0001\u0000\u0000\u0000\u02be\u02c0\b\u0001\u0000\u0000"+ + "\u02bf\u02be\u0001\u0000\u0000\u0000\u02c0\u02c3\u0001\u0000\u0000\u0000"+ + "\u02c1\u02c2\u0001\u0000\u0000\u0000\u02c1\u02bf\u0001\u0000\u0000\u0000"+ + "\u02c2\u02c4\u0001\u0000\u0000\u0000\u02c3\u02c1\u0001\u0000\u0000\u0000"+ + "\u02c4\u02c5\u0005\"\u0000\u0000\u02c5\u02c6\u0005\"\u0000\u0000\u02c6"+ + "\u02c7\u0005\"\u0000\u0000\u02c7\u02c9\u0001\u0000\u0000\u0000\u02c8\u02ca"+ + "\u0005\"\u0000\u0000\u02c9\u02c8\u0001\u0000\u0000\u0000\u02c9\u02ca\u0001"+ + "\u0000\u0000\u0000\u02ca\u02cc\u0001\u0000\u0000\u0000\u02cb\u02cd\u0005"+ + "\"\u0000\u0000\u02cc\u02cb\u0001\u0000\u0000\u0000\u02cc\u02cd\u0001\u0000"+ + "\u0000\u0000\u02cd\u02cf\u0001\u0000\u0000\u0000\u02ce\u02b1\u0001\u0000"+ + "\u0000\u0000\u02ce\u02ba\u0001\u0000\u0000\u0000\u02cfe\u0001\u0000\u0000"+ + "\u0000\u02d0\u02d2\u0003P \u0000\u02d1\u02d0\u0001\u0000\u0000\u0000\u02d2"+ + "\u02d3\u0001\u0000\u0000\u0000\u02d3\u02d1\u0001\u0000\u0000\u0000\u02d3"+ + "\u02d4\u0001\u0000\u0000\u0000\u02d4g\u0001\u0000\u0000\u0000\u02d5\u02d7"+ + "\u0003P \u0000\u02d6\u02d5\u0001\u0000\u0000\u0000\u02d7\u02d8\u0001\u0000"+ + "\u0000\u0000\u02d8\u02d6\u0001\u0000\u0000\u0000\u02d8\u02d9\u0001\u0000"+ + "\u0000\u0000\u02d9\u02da\u0001\u0000\u0000\u0000\u02da\u02de\u0003x4\u0000"+ + "\u02db\u02dd\u0003P \u0000\u02dc\u02db\u0001\u0000\u0000\u0000\u02dd\u02e0"+ + "\u0001\u0000\u0000\u0000\u02de\u02dc\u0001\u0000\u0000\u0000\u02de\u02df"+ + "\u0001\u0000\u0000\u0000\u02df\u0300\u0001\u0000\u0000\u0000\u02e0\u02de"+ + "\u0001\u0000\u0000\u0000\u02e1\u02e3\u0003x4\u0000\u02e2\u02e4\u0003P"+ + " \u0000\u02e3\u02e2\u0001\u0000\u0000\u0000\u02e4\u02e5\u0001\u0000\u0000"+ + "\u0000\u02e5\u02e3\u0001\u0000\u0000\u0000\u02e5\u02e6\u0001\u0000\u0000"+ + "\u0000\u02e6\u0300\u0001\u0000\u0000\u0000\u02e7\u02e9\u0003P \u0000\u02e8"+ + "\u02e7\u0001\u0000\u0000\u0000\u02e9\u02ea\u0001\u0000\u0000\u0000\u02ea"+ + "\u02e8\u0001\u0000\u0000\u0000\u02ea\u02eb\u0001\u0000\u0000\u0000\u02eb"+ + "\u02f3\u0001\u0000\u0000\u0000\u02ec\u02f0\u0003x4\u0000\u02ed\u02ef\u0003"+ + "P \u0000\u02ee\u02ed\u0001\u0000\u0000\u0000\u02ef\u02f2\u0001\u0000\u0000"+ + "\u0000\u02f0\u02ee\u0001\u0000\u0000\u0000\u02f0\u02f1\u0001\u0000\u0000"+ + "\u0000\u02f1\u02f4\u0001\u0000\u0000\u0000\u02f2\u02f0\u0001\u0000\u0000"+ + "\u0000\u02f3\u02ec\u0001\u0000\u0000\u0000\u02f3\u02f4\u0001\u0000\u0000"+ "\u0000\u02f4\u02f5\u0001\u0000\u0000\u0000\u02f5\u02f6\u0003X$\u0000\u02f6"+ - "\u02f8\u0001\u0000\u0000\u0000\u02f7\u02ce\u0001\u0000\u0000\u0000\u02f7"+ - "\u02d9\u0001\u0000\u0000\u0000\u02f7\u02e0\u0001\u0000\u0000\u0000\u02f7"+ - "\u02ef\u0001\u0000\u0000\u0000\u02f8i\u0001\u0000\u0000\u0000\u02f9\u02fa"+ - "\u0005b\u0000\u0000\u02fa\u02fb\u0005y\u0000\u0000\u02fbk\u0001\u0000"+ - "\u0000\u0000\u02fc\u02fd\u0005a\u0000\u0000\u02fd\u02fe\u0005n\u0000\u0000"+ - "\u02fe\u02ff\u0005d\u0000\u0000\u02ffm\u0001\u0000\u0000\u0000\u0300\u0301"+ - "\u0005a\u0000\u0000\u0301\u0302\u0005s\u0000\u0000\u0302\u0303\u0005c"+ - "\u0000\u0000\u0303o\u0001\u0000\u0000\u0000\u0304\u0305\u0005=\u0000\u0000"+ - "\u0305q\u0001\u0000\u0000\u0000\u0306\u0307\u0005:\u0000\u0000\u0307\u0308"+ - "\u0005:\u0000\u0000\u0308s\u0001\u0000\u0000\u0000\u0309\u030a\u0005,"+ - "\u0000\u0000\u030au\u0001\u0000\u0000\u0000\u030b\u030c\u0005d\u0000\u0000"+ - "\u030c\u030d\u0005e\u0000\u0000\u030d\u030e\u0005s\u0000\u0000\u030e\u030f"+ - "\u0005c\u0000\u0000\u030fw\u0001\u0000\u0000\u0000\u0310\u0311\u0005."+ - "\u0000\u0000\u0311y\u0001\u0000\u0000\u0000\u0312\u0313\u0005f\u0000\u0000"+ - "\u0313\u0314\u0005a\u0000\u0000\u0314\u0315\u0005l\u0000\u0000\u0315\u0316"+ - "\u0005s\u0000\u0000\u0316\u0317\u0005e\u0000\u0000\u0317{\u0001\u0000"+ - "\u0000\u0000\u0318\u0319\u0005f\u0000\u0000\u0319\u031a\u0005i\u0000\u0000"+ - "\u031a\u031b\u0005r\u0000\u0000\u031b\u031c\u0005s\u0000\u0000\u031c\u031d"+ - "\u0005t\u0000\u0000\u031d}\u0001\u0000\u0000\u0000\u031e\u031f\u0005l"+ - "\u0000\u0000\u031f\u0320\u0005a\u0000\u0000\u0320\u0321\u0005s\u0000\u0000"+ - "\u0321\u0322\u0005t\u0000\u0000\u0322\u007f\u0001\u0000\u0000\u0000\u0323"+ - "\u0324\u0005(\u0000\u0000\u0324\u0081\u0001\u0000\u0000\u0000\u0325\u0326"+ - "\u0005i\u0000\u0000\u0326\u0327\u0005n\u0000\u0000\u0327\u0083\u0001\u0000"+ - "\u0000\u0000\u0328\u0329\u0005i\u0000\u0000\u0329\u032a\u0005s\u0000\u0000"+ - "\u032a\u0085\u0001\u0000\u0000\u0000\u032b\u032c\u0005l\u0000\u0000\u032c"+ - "\u032d\u0005i\u0000\u0000\u032d\u032e\u0005k\u0000\u0000\u032e\u032f\u0005"+ - "e\u0000\u0000\u032f\u0087\u0001\u0000\u0000\u0000\u0330\u0331\u0005n\u0000"+ - "\u0000\u0331\u0332\u0005o\u0000\u0000\u0332\u0333\u0005t\u0000\u0000\u0333"+ - "\u0089\u0001\u0000\u0000\u0000\u0334\u0335\u0005n\u0000\u0000\u0335\u0336"+ - "\u0005u\u0000\u0000\u0336\u0337\u0005l\u0000\u0000\u0337\u0338\u0005l"+ - "\u0000\u0000\u0338\u008b\u0001\u0000\u0000\u0000\u0339\u033a\u0005n\u0000"+ - "\u0000\u033a\u033b\u0005u\u0000\u0000\u033b\u033c\u0005l\u0000\u0000\u033c"+ - "\u033d\u0005l\u0000\u0000\u033d\u033e\u0005s\u0000\u0000\u033e\u008d\u0001"+ - "\u0000\u0000\u0000\u033f\u0340\u0005o\u0000\u0000\u0340\u0341\u0005r\u0000"+ - "\u0000\u0341\u008f\u0001\u0000\u0000\u0000\u0342\u0343\u0005?\u0000\u0000"+ - "\u0343\u0091\u0001\u0000\u0000\u0000\u0344\u0345\u0005r\u0000\u0000\u0345"+ - "\u0346\u0005l\u0000\u0000\u0346\u0347\u0005i\u0000\u0000\u0347\u0348\u0005"+ - "k\u0000\u0000\u0348\u0349\u0005e\u0000\u0000\u0349\u0093\u0001\u0000\u0000"+ - "\u0000\u034a\u034b\u0005)\u0000\u0000\u034b\u0095\u0001\u0000\u0000\u0000"+ - "\u034c\u034d\u0005t\u0000\u0000\u034d\u034e\u0005r\u0000\u0000\u034e\u034f"+ - "\u0005u\u0000\u0000\u034f\u0350\u0005e\u0000\u0000\u0350\u0097\u0001\u0000"+ - "\u0000\u0000\u0351\u0352\u0005=\u0000\u0000\u0352\u0353\u0005=\u0000\u0000"+ - "\u0353\u0099\u0001\u0000\u0000\u0000\u0354\u0355\u0005=\u0000\u0000\u0355"+ - "\u0356\u0005~\u0000\u0000\u0356\u009b\u0001\u0000\u0000\u0000\u0357\u0358"+ - "\u0005!\u0000\u0000\u0358\u0359\u0005=\u0000\u0000\u0359\u009d\u0001\u0000"+ - "\u0000\u0000\u035a\u035b\u0005<\u0000\u0000\u035b\u009f\u0001\u0000\u0000"+ - "\u0000\u035c\u035d\u0005<\u0000\u0000\u035d\u035e\u0005=\u0000\u0000\u035e"+ - "\u00a1\u0001\u0000\u0000\u0000\u035f\u0360\u0005>\u0000\u0000\u0360\u00a3"+ - "\u0001\u0000\u0000\u0000\u0361\u0362\u0005>\u0000\u0000\u0362\u0363\u0005"+ - "=\u0000\u0000\u0363\u00a5\u0001\u0000\u0000\u0000\u0364\u0365\u0005+\u0000"+ - "\u0000\u0365\u00a7\u0001\u0000\u0000\u0000\u0366\u0367\u0005-\u0000\u0000"+ - "\u0367\u00a9\u0001\u0000\u0000\u0000\u0368\u0369\u0005*\u0000\u0000\u0369"+ - "\u00ab\u0001\u0000\u0000\u0000\u036a\u036b\u0005/\u0000\u0000\u036b\u00ad"+ - "\u0001\u0000\u0000\u0000\u036c\u036d\u0005%\u0000\u0000\u036d\u00af\u0001"+ - "\u0000\u0000\u0000\u036e\u036f\u0003\u0090@\u0000\u036f\u0373\u0003R!"+ - "\u0000\u0370\u0372\u0003b)\u0000\u0371\u0370\u0001\u0000\u0000\u0000\u0372"+ - "\u0375\u0001\u0000\u0000\u0000\u0373\u0371\u0001\u0000\u0000\u0000\u0373"+ - "\u0374\u0001\u0000\u0000\u0000\u0374\u037d\u0001\u0000\u0000\u0000\u0375"+ - "\u0373\u0001\u0000\u0000\u0000\u0376\u0378\u0003\u0090@\u0000\u0377\u0379"+ - "\u0003P \u0000\u0378\u0377\u0001\u0000\u0000\u0000\u0379\u037a\u0001\u0000"+ - "\u0000\u0000\u037a\u0378\u0001\u0000\u0000\u0000\u037a\u037b\u0001\u0000"+ - "\u0000\u0000\u037b\u037d\u0001\u0000\u0000\u0000\u037c\u036e\u0001\u0000"+ - "\u0000\u0000\u037c\u0376\u0001\u0000\u0000\u0000\u037d\u00b1\u0001\u0000"+ - "\u0000\u0000\u037e\u037f\u0005[\u0000\u0000\u037f\u0380\u0001\u0000\u0000"+ - "\u0000\u0380\u0381\u0006Q\u0000\u0000\u0381\u0382\u0006Q\u0000\u0000\u0382"+ - "\u00b3\u0001\u0000\u0000\u0000\u0383\u0384\u0005]\u0000\u0000\u0384\u0385"+ - "\u0001\u0000\u0000\u0000\u0385\u0386\u0006R\u000f\u0000\u0386\u0387\u0006"+ - "R\u000f\u0000\u0387\u00b5\u0001\u0000\u0000\u0000\u0388\u038c\u0003R!"+ - "\u0000\u0389\u038b\u0003b)\u0000\u038a\u0389\u0001\u0000\u0000\u0000\u038b"+ - "\u038e\u0001\u0000\u0000\u0000\u038c\u038a\u0001\u0000\u0000\u0000\u038c"+ - "\u038d\u0001\u0000\u0000\u0000\u038d\u0399\u0001\u0000\u0000\u0000\u038e"+ - "\u038c\u0001\u0000\u0000\u0000\u038f\u0392\u0003`(\u0000\u0390\u0392\u0003"+ - "Z%\u0000\u0391\u038f\u0001\u0000\u0000\u0000\u0391\u0390\u0001\u0000\u0000"+ - "\u0000\u0392\u0394\u0001\u0000\u0000\u0000\u0393\u0395\u0003b)\u0000\u0394"+ - "\u0393\u0001\u0000\u0000\u0000\u0395\u0396\u0001\u0000\u0000\u0000\u0396"+ - "\u0394\u0001\u0000\u0000\u0000\u0396\u0397\u0001\u0000\u0000\u0000\u0397"+ - "\u0399\u0001\u0000\u0000\u0000\u0398\u0388\u0001\u0000\u0000\u0000\u0398"+ - "\u0391\u0001\u0000\u0000\u0000\u0399\u00b7\u0001\u0000\u0000\u0000\u039a"+ - "\u039c\u0003\\&\u0000\u039b\u039d\u0003^\'\u0000\u039c\u039b\u0001\u0000"+ - "\u0000\u0000\u039d\u039e\u0001\u0000\u0000\u0000\u039e\u039c\u0001\u0000"+ - "\u0000\u0000\u039e\u039f\u0001\u0000\u0000\u0000\u039f\u03a0\u0001\u0000"+ - "\u0000\u0000\u03a0\u03a1\u0003\\&\u0000\u03a1\u00b9\u0001\u0000\u0000"+ - "\u0000\u03a2\u03a3\u0003\u00b8T\u0000\u03a3\u00bb\u0001\u0000\u0000\u0000"+ - "\u03a4\u03a5\u0003:\u0015\u0000\u03a5\u03a6\u0001\u0000\u0000\u0000\u03a6"+ - "\u03a7\u0006V\u000b\u0000\u03a7\u00bd\u0001\u0000\u0000\u0000\u03a8\u03a9"+ - "\u0003<\u0016\u0000\u03a9\u03aa\u0001\u0000\u0000\u0000\u03aa\u03ab\u0006"+ - "W\u000b\u0000\u03ab\u00bf\u0001\u0000\u0000\u0000\u03ac\u03ad\u0003>\u0017"+ - "\u0000\u03ad\u03ae\u0001\u0000\u0000\u0000\u03ae\u03af\u0006X\u000b\u0000"+ - "\u03af\u00c1\u0001\u0000\u0000\u0000\u03b0\u03b1\u0003N\u001f\u0000\u03b1"+ - "\u03b2\u0001\u0000\u0000\u0000\u03b2\u03b3\u0006Y\u000e\u0000\u03b3\u03b4"+ - "\u0006Y\u000f\u0000\u03b4\u00c3\u0001\u0000\u0000\u0000\u03b5\u03b6\u0003"+ - "\u00b2Q\u0000\u03b6\u03b7\u0001\u0000\u0000\u0000\u03b7\u03b8\u0006Z\f"+ - "\u0000\u03b8\u00c5\u0001\u0000\u0000\u0000\u03b9\u03ba\u0003\u00b4R\u0000"+ - "\u03ba\u03bb\u0001\u0000\u0000\u0000\u03bb\u03bc\u0006[\u0010\u0000\u03bc"+ - "\u00c7\u0001\u0000\u0000\u0000\u03bd\u03be\u0003t2\u0000\u03be\u03bf\u0001"+ - "\u0000\u0000\u0000\u03bf\u03c0\u0006\\\u0011\u0000\u03c0\u00c9\u0001\u0000"+ - "\u0000\u0000\u03c1\u03c2\u0003p0\u0000\u03c2\u03c3\u0001\u0000\u0000\u0000"+ - "\u03c3\u03c4\u0006]\u0012\u0000\u03c4\u00cb\u0001\u0000\u0000\u0000\u03c5"+ - "\u03c6\u0003d*\u0000\u03c6\u03c7\u0001\u0000\u0000\u0000\u03c7\u03c8\u0006"+ - "^\u0013\u0000\u03c8\u00cd\u0001\u0000\u0000\u0000\u03c9\u03ca\u0005m\u0000"+ - "\u0000\u03ca\u03cb\u0005e\u0000\u0000\u03cb\u03cc\u0005t\u0000\u0000\u03cc"+ - "\u03cd\u0005a\u0000\u0000\u03cd\u03ce\u0005d\u0000\u0000\u03ce\u03cf\u0005"+ - "a\u0000\u0000\u03cf\u03d0\u0005t\u0000\u0000\u03d0\u03d1\u0005a\u0000"+ - "\u0000\u03d1\u00cf\u0001\u0000\u0000\u0000\u03d2\u03d3\u0003B\u0019\u0000"+ - "\u03d3\u03d4\u0001\u0000\u0000\u0000\u03d4\u03d5\u0006`\u0014\u0000\u03d5"+ - "\u00d1\u0001\u0000\u0000\u0000\u03d6\u03d7\u0003:\u0015\u0000\u03d7\u03d8"+ - "\u0001\u0000\u0000\u0000\u03d8\u03d9\u0006a\u000b\u0000\u03d9\u00d3\u0001"+ - "\u0000\u0000\u0000\u03da\u03db\u0003<\u0016\u0000\u03db\u03dc\u0001\u0000"+ - "\u0000\u0000\u03dc\u03dd\u0006b\u000b\u0000\u03dd\u00d5\u0001\u0000\u0000"+ - "\u0000\u03de\u03df\u0003>\u0017\u0000\u03df\u03e0\u0001\u0000\u0000\u0000"+ - "\u03e0\u03e1\u0006c\u000b\u0000\u03e1\u00d7\u0001\u0000\u0000\u0000\u03e2"+ - "\u03e3\u0003N\u001f\u0000\u03e3\u03e4\u0001\u0000\u0000\u0000\u03e4\u03e5"+ - "\u0006d\u000e\u0000\u03e5\u03e6\u0006d\u000f\u0000\u03e6\u00d9\u0001\u0000"+ - "\u0000\u0000\u03e7\u03e8\u0003x4\u0000\u03e8\u03e9\u0001\u0000\u0000\u0000"+ - "\u03e9\u03ea\u0006e\u0015\u0000\u03ea\u00db\u0001\u0000\u0000\u0000\u03eb"+ - "\u03ec\u0003t2\u0000\u03ec\u03ed\u0001\u0000\u0000\u0000\u03ed\u03ee\u0006"+ - "f\u0011\u0000\u03ee\u00dd\u0001\u0000\u0000\u0000\u03ef\u03f4\u0003R!"+ - "\u0000\u03f0\u03f4\u0003P \u0000\u03f1\u03f4\u0003`(\u0000\u03f2\u03f4"+ - "\u0003\u00aaM\u0000\u03f3\u03ef\u0001\u0000\u0000\u0000\u03f3\u03f0\u0001"+ - "\u0000\u0000\u0000\u03f3\u03f1\u0001\u0000\u0000\u0000\u03f3\u03f2\u0001"+ - "\u0000\u0000\u0000\u03f4\u00df\u0001\u0000\u0000\u0000\u03f5\u03f8\u0003"+ - "R!\u0000\u03f6\u03f8\u0003\u00aaM\u0000\u03f7\u03f5\u0001\u0000\u0000"+ - "\u0000\u03f7\u03f6\u0001\u0000\u0000\u0000\u03f8\u03fc\u0001\u0000\u0000"+ - "\u0000\u03f9\u03fb\u0003\u00deg\u0000\u03fa\u03f9\u0001\u0000\u0000\u0000"+ - "\u03fb\u03fe\u0001\u0000\u0000\u0000\u03fc\u03fa\u0001\u0000\u0000\u0000"+ - "\u03fc\u03fd\u0001\u0000\u0000\u0000\u03fd\u0409\u0001\u0000\u0000\u0000"+ - "\u03fe\u03fc\u0001\u0000\u0000\u0000\u03ff\u0402\u0003`(\u0000\u0400\u0402"+ - "\u0003Z%\u0000\u0401\u03ff\u0001\u0000\u0000\u0000\u0401\u0400\u0001\u0000"+ - "\u0000\u0000\u0402\u0404\u0001\u0000\u0000\u0000\u0403\u0405\u0003\u00de"+ - "g\u0000\u0404\u0403\u0001\u0000\u0000\u0000\u0405\u0406\u0001\u0000\u0000"+ - "\u0000\u0406\u0404\u0001\u0000\u0000\u0000\u0406\u0407\u0001\u0000\u0000"+ - "\u0000\u0407\u0409\u0001\u0000\u0000\u0000\u0408\u03f7\u0001\u0000\u0000"+ - "\u0000\u0408\u0401\u0001\u0000\u0000\u0000\u0409\u00e1\u0001\u0000\u0000"+ - "\u0000\u040a\u040d\u0003\u00e0h\u0000\u040b\u040d\u0003\u00b8T\u0000\u040c"+ - "\u040a\u0001\u0000\u0000\u0000\u040c\u040b\u0001\u0000\u0000\u0000\u040d"+ - "\u040e\u0001\u0000\u0000\u0000\u040e\u040c\u0001\u0000\u0000\u0000\u040e"+ - "\u040f\u0001\u0000\u0000\u0000\u040f\u00e3\u0001\u0000\u0000\u0000\u0410"+ - "\u0411\u0003:\u0015\u0000\u0411\u0412\u0001\u0000\u0000\u0000\u0412\u0413"+ - "\u0006j\u000b\u0000\u0413\u00e5\u0001\u0000\u0000\u0000\u0414\u0415\u0003"+ - "<\u0016\u0000\u0415\u0416\u0001\u0000\u0000\u0000\u0416\u0417\u0006k\u000b"+ - "\u0000\u0417\u00e7\u0001\u0000\u0000\u0000\u0418\u0419\u0003>\u0017\u0000"+ - "\u0419\u041a\u0001\u0000\u0000\u0000\u041a\u041b\u0006l\u000b\u0000\u041b"+ - "\u00e9\u0001\u0000\u0000\u0000\u041c\u041d\u0003N\u001f\u0000\u041d\u041e"+ - "\u0001\u0000\u0000\u0000\u041e\u041f\u0006m\u000e\u0000\u041f\u0420\u0006"+ - "m\u000f\u0000\u0420\u00eb\u0001\u0000\u0000\u0000\u0421\u0422\u0003p0"+ - "\u0000\u0422\u0423\u0001\u0000\u0000\u0000\u0423\u0424\u0006n\u0012\u0000"+ - "\u0424\u00ed\u0001\u0000\u0000\u0000\u0425\u0426\u0003t2\u0000\u0426\u0427"+ - "\u0001\u0000\u0000\u0000\u0427\u0428\u0006o\u0011\u0000\u0428\u00ef\u0001"+ - "\u0000\u0000\u0000\u0429\u042a\u0003x4\u0000\u042a\u042b\u0001\u0000\u0000"+ - "\u0000\u042b\u042c\u0006p\u0015\u0000\u042c\u00f1\u0001\u0000\u0000\u0000"+ - "\u042d\u042e\u0005a\u0000\u0000\u042e\u042f\u0005s\u0000\u0000\u042f\u00f3"+ - "\u0001\u0000\u0000\u0000\u0430\u0431\u0003\u00e2i\u0000\u0431\u0432\u0001"+ - "\u0000\u0000\u0000\u0432\u0433\u0006r\u0016\u0000\u0433\u00f5\u0001\u0000"+ - "\u0000\u0000\u0434\u0435\u0003:\u0015\u0000\u0435\u0436\u0001\u0000\u0000"+ - "\u0000\u0436\u0437\u0006s\u000b\u0000\u0437\u00f7\u0001\u0000\u0000\u0000"+ - "\u0438\u0439\u0003<\u0016\u0000\u0439\u043a\u0001\u0000\u0000\u0000\u043a"+ - "\u043b\u0006t\u000b\u0000\u043b\u00f9\u0001\u0000\u0000\u0000\u043c\u043d"+ - "\u0003>\u0017\u0000\u043d\u043e\u0001\u0000\u0000\u0000\u043e\u043f\u0006"+ - "u\u000b\u0000\u043f\u00fb\u0001\u0000\u0000\u0000\u0440\u0441\u0003N\u001f"+ - "\u0000\u0441\u0442\u0001\u0000\u0000\u0000\u0442\u0443\u0006v\u000e\u0000"+ - "\u0443\u0444\u0006v\u000f\u0000\u0444\u00fd\u0001\u0000\u0000\u0000\u0445"+ - "\u0446\u0003\u00b2Q\u0000\u0446\u0447\u0001\u0000\u0000\u0000\u0447\u0448"+ - "\u0006w\f\u0000\u0448\u0449\u0006w\u0017\u0000\u0449\u00ff\u0001\u0000"+ - "\u0000\u0000\u044a\u044b\u0005o\u0000\u0000\u044b\u044c\u0005n\u0000\u0000"+ - "\u044c\u044d\u0001\u0000\u0000\u0000\u044d\u044e\u0006x\u0018\u0000\u044e"+ - "\u0101\u0001\u0000\u0000\u0000\u044f\u0450\u0005w\u0000\u0000\u0450\u0451"+ - "\u0005i\u0000\u0000\u0451\u0452\u0005t\u0000\u0000\u0452\u0453\u0005h"+ - "\u0000\u0000\u0453\u0454\u0001\u0000\u0000\u0000\u0454\u0455\u0006y\u0018"+ - "\u0000\u0455\u0103\u0001\u0000\u0000\u0000\u0456\u0457\b\f\u0000\u0000"+ - "\u0457\u0105\u0001\u0000\u0000\u0000\u0458\u045a\u0003\u0104z\u0000\u0459"+ - "\u0458\u0001\u0000\u0000\u0000\u045a\u045b\u0001\u0000\u0000\u0000\u045b"+ - "\u0459\u0001\u0000\u0000\u0000\u045b\u045c\u0001\u0000\u0000\u0000\u045c"+ - "\u045d\u0001\u0000\u0000\u0000\u045d\u045e\u0003\u0168\u00ac\u0000\u045e"+ - "\u0460\u0001\u0000\u0000\u0000\u045f\u0459\u0001\u0000\u0000\u0000\u045f"+ - "\u0460\u0001\u0000\u0000\u0000\u0460\u0462\u0001\u0000\u0000\u0000\u0461"+ - "\u0463\u0003\u0104z\u0000\u0462\u0461\u0001\u0000\u0000\u0000\u0463\u0464"+ - "\u0001\u0000\u0000\u0000\u0464\u0462\u0001\u0000\u0000\u0000\u0464\u0465"+ - "\u0001\u0000\u0000\u0000\u0465\u0107\u0001\u0000\u0000\u0000\u0466\u0467"+ - "\u0003\u00baU\u0000\u0467\u0468\u0001\u0000\u0000\u0000\u0468\u0469\u0006"+ - "|\u0019\u0000\u0469\u0109\u0001\u0000\u0000\u0000\u046a\u046b\u0003\u0106"+ - "{\u0000\u046b\u046c\u0001\u0000\u0000\u0000\u046c\u046d\u0006}\u001a\u0000"+ - "\u046d\u010b\u0001\u0000\u0000\u0000\u046e\u046f\u0003:\u0015\u0000\u046f"+ - "\u0470\u0001\u0000\u0000\u0000\u0470\u0471\u0006~\u000b\u0000\u0471\u010d"+ - "\u0001\u0000\u0000\u0000\u0472\u0473\u0003<\u0016\u0000\u0473\u0474\u0001"+ - "\u0000\u0000\u0000\u0474\u0475\u0006\u007f\u000b\u0000\u0475\u010f\u0001"+ - "\u0000\u0000\u0000\u0476\u0477\u0003>\u0017\u0000\u0477\u0478\u0001\u0000"+ - "\u0000\u0000\u0478\u0479\u0006\u0080\u000b\u0000\u0479\u0111\u0001\u0000"+ - "\u0000\u0000\u047a\u047b\u0003N\u001f\u0000\u047b\u047c\u0001\u0000\u0000"+ - "\u0000\u047c\u047d\u0006\u0081\u000e\u0000\u047d\u047e\u0006\u0081\u000f"+ - "\u0000\u047e\u047f\u0006\u0081\u000f\u0000\u047f\u0113\u0001\u0000\u0000"+ - "\u0000\u0480\u0481\u0003p0\u0000\u0481\u0482\u0001\u0000\u0000\u0000\u0482"+ - "\u0483\u0006\u0082\u0012\u0000\u0483\u0115\u0001\u0000\u0000\u0000\u0484"+ - "\u0485\u0003t2\u0000\u0485\u0486\u0001\u0000\u0000\u0000\u0486\u0487\u0006"+ - "\u0083\u0011\u0000\u0487\u0117\u0001\u0000\u0000\u0000\u0488\u0489\u0003"+ - "x4\u0000\u0489\u048a\u0001\u0000\u0000\u0000\u048a\u048b\u0006\u0084\u0015"+ - "\u0000\u048b\u0119\u0001\u0000\u0000\u0000\u048c\u048d\u0003\u0102y\u0000"+ - "\u048d\u048e\u0001\u0000\u0000\u0000\u048e\u048f\u0006\u0085\u001b\u0000"+ - "\u048f\u011b\u0001\u0000\u0000\u0000\u0490\u0491\u0003\u00e2i\u0000\u0491"+ - "\u0492\u0001\u0000\u0000\u0000\u0492\u0493\u0006\u0086\u0016\u0000\u0493"+ - "\u011d\u0001\u0000\u0000\u0000\u0494\u0495\u0003\u00baU\u0000\u0495\u0496"+ - "\u0001\u0000\u0000\u0000\u0496\u0497\u0006\u0087\u0019\u0000\u0497\u011f"+ - "\u0001\u0000\u0000\u0000\u0498\u0499\u0003:\u0015\u0000\u0499\u049a\u0001"+ - "\u0000\u0000\u0000\u049a\u049b\u0006\u0088\u000b\u0000\u049b\u0121\u0001"+ - "\u0000\u0000\u0000\u049c\u049d\u0003<\u0016\u0000\u049d\u049e\u0001\u0000"+ - "\u0000\u0000\u049e\u049f\u0006\u0089\u000b\u0000\u049f\u0123\u0001\u0000"+ - "\u0000\u0000\u04a0\u04a1\u0003>\u0017\u0000\u04a1\u04a2\u0001\u0000\u0000"+ - "\u0000\u04a2\u04a3\u0006\u008a\u000b\u0000\u04a3\u0125\u0001\u0000\u0000"+ - "\u0000\u04a4\u04a5\u0003N\u001f\u0000\u04a5\u04a6\u0001\u0000\u0000\u0000"+ - "\u04a6\u04a7\u0006\u008b\u000e\u0000\u04a7\u04a8\u0006\u008b\u000f\u0000"+ - "\u04a8\u0127\u0001\u0000\u0000\u0000\u04a9\u04aa\u0003t2\u0000\u04aa\u04ab"+ - "\u0001\u0000\u0000\u0000\u04ab\u04ac\u0006\u008c\u0011\u0000\u04ac\u0129"+ - "\u0001\u0000\u0000\u0000\u04ad\u04ae\u0003x4\u0000\u04ae\u04af\u0001\u0000"+ - "\u0000\u0000\u04af\u04b0\u0006\u008d\u0015\u0000\u04b0\u012b\u0001\u0000"+ - "\u0000\u0000\u04b1\u04b2\u0003\u0100x\u0000\u04b2\u04b3\u0001\u0000\u0000"+ - "\u0000\u04b3\u04b4\u0006\u008e\u001c\u0000\u04b4\u04b5\u0006\u008e\u001d"+ - "\u0000\u04b5\u012d\u0001\u0000\u0000\u0000\u04b6\u04b7\u0003B\u0019\u0000"+ - "\u04b7\u04b8\u0001\u0000\u0000\u0000\u04b8\u04b9\u0006\u008f\u0014\u0000"+ - "\u04b9\u012f\u0001\u0000\u0000\u0000\u04ba\u04bb\u0003:\u0015\u0000\u04bb"+ - "\u04bc\u0001\u0000\u0000\u0000\u04bc\u04bd\u0006\u0090\u000b\u0000\u04bd"+ - "\u0131\u0001\u0000\u0000\u0000\u04be\u04bf\u0003<\u0016\u0000\u04bf\u04c0"+ - "\u0001\u0000\u0000\u0000\u04c0\u04c1\u0006\u0091\u000b\u0000\u04c1\u0133"+ - "\u0001\u0000\u0000\u0000\u04c2\u04c3\u0003>\u0017\u0000\u04c3\u04c4\u0001"+ - "\u0000\u0000\u0000\u04c4\u04c5\u0006\u0092\u000b\u0000\u04c5\u0135\u0001"+ - "\u0000\u0000\u0000\u04c6\u04c7\u0003N\u001f\u0000\u04c7\u04c8\u0001\u0000"+ - "\u0000\u0000\u04c8\u04c9\u0006\u0093\u000e\u0000\u04c9\u04ca\u0006\u0093"+ - "\u000f\u0000\u04ca\u04cb\u0006\u0093\u000f\u0000\u04cb\u0137\u0001\u0000"+ - "\u0000\u0000\u04cc\u04cd\u0003t2\u0000\u04cd\u04ce\u0001\u0000\u0000\u0000"+ - "\u04ce\u04cf\u0006\u0094\u0011\u0000\u04cf\u0139\u0001\u0000\u0000\u0000"+ - "\u04d0\u04d1\u0003x4\u0000\u04d1\u04d2\u0001\u0000\u0000\u0000\u04d2\u04d3"+ - "\u0006\u0095\u0015\u0000\u04d3\u013b\u0001\u0000\u0000\u0000\u04d4\u04d5"+ - "\u0003\u00e2i\u0000\u04d5\u04d6\u0001\u0000\u0000\u0000\u04d6\u04d7\u0006"+ - "\u0096\u0016\u0000\u04d7\u013d\u0001\u0000\u0000\u0000\u04d8\u04d9\u0003"+ - ":\u0015\u0000\u04d9\u04da\u0001\u0000\u0000\u0000\u04da\u04db\u0006\u0097"+ - "\u000b\u0000\u04db\u013f\u0001\u0000\u0000\u0000\u04dc\u04dd\u0003<\u0016"+ - "\u0000\u04dd\u04de\u0001\u0000\u0000\u0000\u04de\u04df\u0006\u0098\u000b"+ - "\u0000\u04df\u0141\u0001\u0000\u0000\u0000\u04e0\u04e1\u0003>\u0017\u0000"+ - "\u04e1\u04e2\u0001\u0000\u0000\u0000\u04e2\u04e3\u0006\u0099\u000b\u0000"+ - "\u04e3\u0143\u0001\u0000\u0000\u0000\u04e4\u04e5\u0003N\u001f\u0000\u04e5"+ - "\u04e6\u0001\u0000\u0000\u0000\u04e6\u04e7\u0006\u009a\u000e\u0000\u04e7"+ - "\u04e8\u0006\u009a\u000f\u0000\u04e8\u0145\u0001\u0000\u0000\u0000\u04e9"+ - "\u04ea\u0003x4\u0000\u04ea\u04eb\u0001\u0000\u0000\u0000\u04eb\u04ec\u0006"+ - "\u009b\u0015\u0000\u04ec\u0147\u0001\u0000\u0000\u0000\u04ed\u04ee\u0003"+ - "\u00baU\u0000\u04ee\u04ef\u0001\u0000\u0000\u0000\u04ef\u04f0\u0006\u009c"+ - "\u0019\u0000\u04f0\u0149\u0001\u0000\u0000\u0000\u04f1\u04f2\u0003\u00b6"+ - "S\u0000\u04f2\u04f3\u0001\u0000\u0000\u0000\u04f3\u04f4\u0006\u009d\u001e"+ - "\u0000\u04f4\u014b\u0001\u0000\u0000\u0000\u04f5\u04f6\u0003:\u0015\u0000"+ - "\u04f6\u04f7\u0001\u0000\u0000\u0000\u04f7\u04f8\u0006\u009e\u000b\u0000"+ - "\u04f8\u014d\u0001\u0000\u0000\u0000\u04f9\u04fa\u0003<\u0016\u0000\u04fa"+ - "\u04fb\u0001\u0000\u0000\u0000\u04fb\u04fc\u0006\u009f\u000b\u0000\u04fc"+ - "\u014f\u0001\u0000\u0000\u0000\u04fd\u04fe\u0003>\u0017\u0000\u04fe\u04ff"+ - "\u0001\u0000\u0000\u0000\u04ff\u0500\u0006\u00a0\u000b\u0000\u0500\u0151"+ - "\u0001\u0000\u0000\u0000\u0501\u0502\u0003N\u001f\u0000\u0502\u0503\u0001"+ - "\u0000\u0000\u0000\u0503\u0504\u0006\u00a1\u000e\u0000\u0504\u0505\u0006"+ - "\u00a1\u000f\u0000\u0505\u0153\u0001\u0000\u0000\u0000\u0506\u0507\u0005"+ - "i\u0000\u0000\u0507\u0508\u0005n\u0000\u0000\u0508\u0509\u0005f\u0000"+ - "\u0000\u0509\u050a\u0005o\u0000\u0000\u050a\u0155\u0001\u0000\u0000\u0000"+ - "\u050b\u050c\u0003:\u0015\u0000\u050c\u050d\u0001\u0000\u0000\u0000\u050d"+ - "\u050e\u0006\u00a3\u000b\u0000\u050e\u0157\u0001\u0000\u0000\u0000\u050f"+ - "\u0510\u0003<\u0016\u0000\u0510\u0511\u0001\u0000\u0000\u0000\u0511\u0512"+ - "\u0006\u00a4\u000b\u0000\u0512\u0159\u0001\u0000\u0000\u0000\u0513\u0514"+ - "\u0003>\u0017\u0000\u0514\u0515\u0001\u0000\u0000\u0000\u0515\u0516\u0006"+ - "\u00a5\u000b\u0000\u0516\u015b\u0001\u0000\u0000\u0000\u0517\u0518\u0003"+ - "N\u001f\u0000\u0518\u0519\u0001\u0000\u0000\u0000\u0519\u051a\u0006\u00a6"+ - "\u000e\u0000\u051a\u051b\u0006\u00a6\u000f\u0000\u051b\u015d\u0001\u0000"+ - "\u0000\u0000\u051c\u051d\u0005f\u0000\u0000\u051d\u051e\u0005u\u0000\u0000"+ - "\u051e\u051f\u0005n\u0000\u0000\u051f\u0520\u0005c\u0000\u0000\u0520\u0521"+ - "\u0005t\u0000\u0000\u0521\u0522\u0005i\u0000\u0000\u0522\u0523\u0005o"+ - "\u0000\u0000\u0523\u0524\u0005n\u0000\u0000\u0524\u0525\u0005s\u0000\u0000"+ - "\u0525\u015f\u0001\u0000\u0000\u0000\u0526\u0527\u0003:\u0015\u0000\u0527"+ - "\u0528\u0001\u0000\u0000\u0000\u0528\u0529\u0006\u00a8\u000b\u0000\u0529"+ - "\u0161\u0001\u0000\u0000\u0000\u052a\u052b\u0003<\u0016\u0000\u052b\u052c"+ - "\u0001\u0000\u0000\u0000\u052c\u052d\u0006\u00a9\u000b\u0000\u052d\u0163"+ - "\u0001\u0000\u0000\u0000\u052e\u052f\u0003>\u0017\u0000\u052f\u0530\u0001"+ - "\u0000\u0000\u0000\u0530\u0531\u0006\u00aa\u000b\u0000\u0531\u0165\u0001"+ - "\u0000\u0000\u0000\u0532\u0533\u0003\u00b4R\u0000\u0533\u0534\u0001\u0000"+ - "\u0000\u0000\u0534\u0535\u0006\u00ab\u0010\u0000\u0535\u0536\u0006\u00ab"+ - "\u000f\u0000\u0536\u0167\u0001\u0000\u0000\u0000\u0537\u0538\u0005:\u0000"+ - "\u0000\u0538\u0169\u0001\u0000\u0000\u0000\u0539\u053f\u0003Z%\u0000\u053a"+ - "\u053f\u0003P \u0000\u053b\u053f\u0003x4\u0000\u053c\u053f\u0003R!\u0000"+ - "\u053d\u053f\u0003`(\u0000\u053e\u0539\u0001\u0000\u0000\u0000\u053e\u053a"+ - "\u0001\u0000\u0000\u0000\u053e\u053b\u0001\u0000\u0000\u0000\u053e\u053c"+ - "\u0001\u0000\u0000\u0000\u053e\u053d\u0001\u0000\u0000\u0000\u053f\u0540"+ - "\u0001\u0000\u0000\u0000\u0540\u053e\u0001\u0000\u0000\u0000\u0540\u0541"+ - "\u0001\u0000\u0000\u0000\u0541\u016b\u0001\u0000\u0000\u0000\u0542\u0543"+ - "\u0003:\u0015\u0000\u0543\u0544\u0001\u0000\u0000\u0000\u0544\u0545\u0006"+ - "\u00ae\u000b\u0000\u0545\u016d\u0001\u0000\u0000\u0000\u0546\u0547\u0003"+ - "<\u0016\u0000\u0547\u0548\u0001\u0000\u0000\u0000\u0548\u0549\u0006\u00af"+ - "\u000b\u0000\u0549\u016f\u0001\u0000\u0000\u0000\u054a\u054b\u0003>\u0017"+ - "\u0000\u054b\u054c\u0001\u0000\u0000\u0000\u054c\u054d\u0006\u00b0\u000b"+ - "\u0000\u054d\u0171\u0001\u0000\u0000\u0000\u054e\u054f\u0003N\u001f\u0000"+ - "\u054f\u0550\u0001\u0000\u0000\u0000\u0550\u0551\u0006\u00b1\u000e\u0000"+ - "\u0551\u0552\u0006\u00b1\u000f\u0000\u0552\u0173\u0001\u0000\u0000\u0000"+ - "\u0553\u0554\u0003B\u0019\u0000\u0554\u0555\u0001\u0000\u0000\u0000\u0555"+ - "\u0556\u0006\u00b2\u0014\u0000\u0556\u0557\u0006\u00b2\u000f\u0000\u0557"+ - "\u0558\u0006\u00b2\u001f\u0000\u0558\u0175\u0001\u0000\u0000\u0000\u0559"+ - "\u055a\u0003:\u0015\u0000\u055a\u055b\u0001\u0000\u0000\u0000\u055b\u055c"+ - "\u0006\u00b3\u000b\u0000\u055c\u0177\u0001\u0000\u0000\u0000\u055d\u055e"+ - "\u0003<\u0016\u0000\u055e\u055f\u0001\u0000\u0000\u0000\u055f\u0560\u0006"+ - "\u00b4\u000b\u0000\u0560\u0179\u0001\u0000\u0000\u0000\u0561\u0562\u0003"+ - ">\u0017\u0000\u0562\u0563\u0001\u0000\u0000\u0000\u0563\u0564\u0006\u00b5"+ - "\u000b\u0000\u0564\u017b\u0001\u0000\u0000\u0000\u0565\u0566\u0003t2\u0000"+ - "\u0566\u0567\u0001\u0000\u0000\u0000\u0567\u0568\u0006\u00b6\u0011\u0000"+ - "\u0568\u0569\u0006\u00b6\u000f\u0000\u0569\u056a\u0006\u00b6\u0007\u0000"+ - "\u056a\u017d\u0001\u0000\u0000\u0000\u056b\u056c\u0003:\u0015\u0000\u056c"+ - "\u056d\u0001\u0000\u0000\u0000\u056d\u056e\u0006\u00b7\u000b\u0000\u056e"+ - "\u017f\u0001\u0000\u0000\u0000\u056f\u0570\u0003<\u0016\u0000\u0570\u0571"+ - "\u0001\u0000\u0000\u0000\u0571\u0572\u0006\u00b8\u000b\u0000\u0572\u0181"+ - "\u0001\u0000\u0000\u0000\u0573\u0574\u0003>\u0017\u0000\u0574\u0575\u0001"+ - "\u0000\u0000\u0000\u0575\u0576\u0006\u00b9\u000b\u0000\u0576\u0183\u0001"+ - "\u0000\u0000\u0000\u0577\u0578\u0003\u00baU\u0000\u0578\u0579\u0001\u0000"+ - "\u0000\u0000\u0579\u057a\u0006\u00ba\u000f\u0000\u057a\u057b\u0006\u00ba"+ - "\u0000\u0000\u057b\u057c\u0006\u00ba\u0019\u0000\u057c\u0185\u0001\u0000"+ - "\u0000\u0000\u057d\u057e\u0003\u00b6S\u0000\u057e\u057f\u0001\u0000\u0000"+ - "\u0000\u057f\u0580\u0006\u00bb\u000f\u0000\u0580\u0581\u0006\u00bb\u0000"+ - "\u0000\u0581\u0582\u0006\u00bb\u001e\u0000\u0582\u0187\u0001\u0000\u0000"+ - "\u0000\u0583\u0584\u0003j-\u0000\u0584\u0585\u0001\u0000\u0000\u0000\u0585"+ - "\u0586\u0006\u00bc\u000f\u0000\u0586\u0587\u0006\u00bc\u0000\u0000\u0587"+ - "\u0588\u0006\u00bc \u0000\u0588\u0189\u0001\u0000\u0000\u0000\u0589\u058a"+ - "\u0003N\u001f\u0000\u058a\u058b\u0001\u0000\u0000\u0000\u058b\u058c\u0006"+ - "\u00bd\u000e\u0000\u058c\u058d\u0006\u00bd\u000f\u0000\u058d\u018b\u0001"+ - "\u0000\u0000\u0000A\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b"+ - "\t\n\u000b\f\r\u000e\u000f\u0238\u0242\u0246\u0249\u0252\u0254\u025f\u0266"+ - "\u026b\u0292\u0297\u02a0\u02a7\u02ac\u02ae\u02b9\u02c1\u02c4\u02c6\u02cb"+ - "\u02d0\u02d6\u02dd\u02e2\u02e8\u02eb\u02f3\u02f7\u0373\u037a\u037c\u038c"+ - "\u0391\u0396\u0398\u039e\u03f3\u03f7\u03fc\u0401\u0406\u0408\u040c\u040e"+ - "\u045b\u045f\u0464\u053e\u0540!\u0005\u0002\u0000\u0005\u0004\u0000\u0005"+ - "\u0006\u0000\u0005\u0001\u0000\u0005\u0003\u0000\u0005\b\u0000\u0005\f"+ - "\u0000\u0005\u000e\u0000\u0005\n\u0000\u0005\u0005\u0000\u0005\u000b\u0000"+ - "\u0000\u0001\u0000\u0007E\u0000\u0005\u0000\u0000\u0007\u001d\u0000\u0004"+ - "\u0000\u0000\u0007F\u0000\u0007&\u0000\u0007$\u0000\u0007\u001e\u0000"+ - "\u0007\u0019\u0000\u0007(\u0000\u0007P\u0000\u0005\r\u0000\u0005\u0007"+ - "\u0000\u0007H\u0000\u0007Z\u0000\u0007Y\u0000\u0007X\u0000\u0005\t\u0000"+ - "\u0007G\u0000\u0005\u000f\u0000\u0007!\u0000"; + "\u0300\u0001\u0000\u0000\u0000\u02f7\u02f9\u0003x4\u0000\u02f8\u02fa\u0003"+ + "P \u0000\u02f9\u02f8\u0001\u0000\u0000\u0000\u02fa\u02fb\u0001\u0000\u0000"+ + "\u0000\u02fb\u02f9\u0001\u0000\u0000\u0000\u02fb\u02fc\u0001\u0000\u0000"+ + "\u0000\u02fc\u02fd\u0001\u0000\u0000\u0000\u02fd\u02fe\u0003X$\u0000\u02fe"+ + "\u0300\u0001\u0000\u0000\u0000\u02ff\u02d6\u0001\u0000\u0000\u0000\u02ff"+ + "\u02e1\u0001\u0000\u0000\u0000\u02ff\u02e8\u0001\u0000\u0000\u0000\u02ff"+ + "\u02f7\u0001\u0000\u0000\u0000\u0300i\u0001\u0000\u0000\u0000\u0301\u0302"+ + "\u0005b\u0000\u0000\u0302\u0303\u0005y\u0000\u0000\u0303k\u0001\u0000"+ + "\u0000\u0000\u0304\u0305\u0005a\u0000\u0000\u0305\u0306\u0005n\u0000\u0000"+ + "\u0306\u0307\u0005d\u0000\u0000\u0307m\u0001\u0000\u0000\u0000\u0308\u0309"+ + "\u0005a\u0000\u0000\u0309\u030a\u0005s\u0000\u0000\u030a\u030b\u0005c"+ + "\u0000\u0000\u030bo\u0001\u0000\u0000\u0000\u030c\u030d\u0005=\u0000\u0000"+ + "\u030dq\u0001\u0000\u0000\u0000\u030e\u030f\u0005:\u0000\u0000\u030f\u0310"+ + "\u0005:\u0000\u0000\u0310s\u0001\u0000\u0000\u0000\u0311\u0312\u0005,"+ + "\u0000\u0000\u0312u\u0001\u0000\u0000\u0000\u0313\u0314\u0005d\u0000\u0000"+ + "\u0314\u0315\u0005e\u0000\u0000\u0315\u0316\u0005s\u0000\u0000\u0316\u0317"+ + "\u0005c\u0000\u0000\u0317w\u0001\u0000\u0000\u0000\u0318\u0319\u0005."+ + "\u0000\u0000\u0319y\u0001\u0000\u0000\u0000\u031a\u031b\u0005f\u0000\u0000"+ + "\u031b\u031c\u0005a\u0000\u0000\u031c\u031d\u0005l\u0000\u0000\u031d\u031e"+ + "\u0005s\u0000\u0000\u031e\u031f\u0005e\u0000\u0000\u031f{\u0001\u0000"+ + "\u0000\u0000\u0320\u0321\u0005f\u0000\u0000\u0321\u0322\u0005i\u0000\u0000"+ + "\u0322\u0323\u0005r\u0000\u0000\u0323\u0324\u0005s\u0000\u0000\u0324\u0325"+ + "\u0005t\u0000\u0000\u0325}\u0001\u0000\u0000\u0000\u0326\u0327\u0005l"+ + "\u0000\u0000\u0327\u0328\u0005a\u0000\u0000\u0328\u0329\u0005s\u0000\u0000"+ + "\u0329\u032a\u0005t\u0000\u0000\u032a\u007f\u0001\u0000\u0000\u0000\u032b"+ + "\u032c\u0005(\u0000\u0000\u032c\u0081\u0001\u0000\u0000\u0000\u032d\u032e"+ + "\u0005i\u0000\u0000\u032e\u032f\u0005n\u0000\u0000\u032f\u0083\u0001\u0000"+ + "\u0000\u0000\u0330\u0331\u0005i\u0000\u0000\u0331\u0332\u0005s\u0000\u0000"+ + "\u0332\u0085\u0001\u0000\u0000\u0000\u0333\u0334\u0005l\u0000\u0000\u0334"+ + "\u0335\u0005i\u0000\u0000\u0335\u0336\u0005k\u0000\u0000\u0336\u0337\u0005"+ + "e\u0000\u0000\u0337\u0087\u0001\u0000\u0000\u0000\u0338\u0339\u0005n\u0000"+ + "\u0000\u0339\u033a\u0005o\u0000\u0000\u033a\u033b\u0005t\u0000\u0000\u033b"+ + "\u0089\u0001\u0000\u0000\u0000\u033c\u033d\u0005n\u0000\u0000\u033d\u033e"+ + "\u0005u\u0000\u0000\u033e\u033f\u0005l\u0000\u0000\u033f\u0340\u0005l"+ + "\u0000\u0000\u0340\u008b\u0001\u0000\u0000\u0000\u0341\u0342\u0005n\u0000"+ + "\u0000\u0342\u0343\u0005u\u0000\u0000\u0343\u0344\u0005l\u0000\u0000\u0344"+ + "\u0345\u0005l\u0000\u0000\u0345\u0346\u0005s\u0000\u0000\u0346\u008d\u0001"+ + "\u0000\u0000\u0000\u0347\u0348\u0005o\u0000\u0000\u0348\u0349\u0005r\u0000"+ + "\u0000\u0349\u008f\u0001\u0000\u0000\u0000\u034a\u034b\u0005?\u0000\u0000"+ + "\u034b\u0091\u0001\u0000\u0000\u0000\u034c\u034d\u0005r\u0000\u0000\u034d"+ + "\u034e\u0005l\u0000\u0000\u034e\u034f\u0005i\u0000\u0000\u034f\u0350\u0005"+ + "k\u0000\u0000\u0350\u0351\u0005e\u0000\u0000\u0351\u0093\u0001\u0000\u0000"+ + "\u0000\u0352\u0353\u0005)\u0000\u0000\u0353\u0095\u0001\u0000\u0000\u0000"+ + "\u0354\u0355\u0005t\u0000\u0000\u0355\u0356\u0005r\u0000\u0000\u0356\u0357"+ + "\u0005u\u0000\u0000\u0357\u0358\u0005e\u0000\u0000\u0358\u0097\u0001\u0000"+ + "\u0000\u0000\u0359\u035a\u0005=\u0000\u0000\u035a\u035b\u0005=\u0000\u0000"+ + "\u035b\u0099\u0001\u0000\u0000\u0000\u035c\u035d\u0005=\u0000\u0000\u035d"+ + "\u035e\u0005~\u0000\u0000\u035e\u009b\u0001\u0000\u0000\u0000\u035f\u0360"+ + "\u0005!\u0000\u0000\u0360\u0361\u0005=\u0000\u0000\u0361\u009d\u0001\u0000"+ + "\u0000\u0000\u0362\u0363\u0005<\u0000\u0000\u0363\u009f\u0001\u0000\u0000"+ + "\u0000\u0364\u0365\u0005<\u0000\u0000\u0365\u0366\u0005=\u0000\u0000\u0366"+ + "\u00a1\u0001\u0000\u0000\u0000\u0367\u0368\u0005>\u0000\u0000\u0368\u00a3"+ + "\u0001\u0000\u0000\u0000\u0369\u036a\u0005>\u0000\u0000\u036a\u036b\u0005"+ + "=\u0000\u0000\u036b\u00a5\u0001\u0000\u0000\u0000\u036c\u036d\u0005+\u0000"+ + "\u0000\u036d\u00a7\u0001\u0000\u0000\u0000\u036e\u036f\u0005-\u0000\u0000"+ + "\u036f\u00a9\u0001\u0000\u0000\u0000\u0370\u0371\u0005*\u0000\u0000\u0371"+ + "\u00ab\u0001\u0000\u0000\u0000\u0372\u0373\u0005/\u0000\u0000\u0373\u00ad"+ + "\u0001\u0000\u0000\u0000\u0374\u0375\u0005%\u0000\u0000\u0375\u00af\u0001"+ + "\u0000\u0000\u0000\u0376\u0377\u0003\u0090@\u0000\u0377\u037b\u0003R!"+ + "\u0000\u0378\u037a\u0003b)\u0000\u0379\u0378\u0001\u0000\u0000\u0000\u037a"+ + "\u037d\u0001\u0000\u0000\u0000\u037b\u0379\u0001\u0000\u0000\u0000\u037b"+ + "\u037c\u0001\u0000\u0000\u0000\u037c\u0385\u0001\u0000\u0000\u0000\u037d"+ + "\u037b\u0001\u0000\u0000\u0000\u037e\u0380\u0003\u0090@\u0000\u037f\u0381"+ + "\u0003P \u0000\u0380\u037f\u0001\u0000\u0000\u0000\u0381\u0382\u0001\u0000"+ + "\u0000\u0000\u0382\u0380\u0001\u0000\u0000\u0000\u0382\u0383\u0001\u0000"+ + "\u0000\u0000\u0383\u0385\u0001\u0000\u0000\u0000\u0384\u0376\u0001\u0000"+ + "\u0000\u0000\u0384\u037e\u0001\u0000\u0000\u0000\u0385\u00b1\u0001\u0000"+ + "\u0000\u0000\u0386\u0387\u0005[\u0000\u0000\u0387\u0388\u0001\u0000\u0000"+ + "\u0000\u0388\u0389\u0006Q\u0000\u0000\u0389\u038a\u0006Q\u0000\u0000\u038a"+ + "\u00b3\u0001\u0000\u0000\u0000\u038b\u038c\u0005]\u0000\u0000\u038c\u038d"+ + "\u0001\u0000\u0000\u0000\u038d\u038e\u0006R\u000f\u0000\u038e\u038f\u0006"+ + "R\u000f\u0000\u038f\u00b5\u0001\u0000\u0000\u0000\u0390\u0394\u0003R!"+ + "\u0000\u0391\u0393\u0003b)\u0000\u0392\u0391\u0001\u0000\u0000\u0000\u0393"+ + "\u0396\u0001\u0000\u0000\u0000\u0394\u0392\u0001\u0000\u0000\u0000\u0394"+ + "\u0395\u0001\u0000\u0000\u0000\u0395\u03a1\u0001\u0000\u0000\u0000\u0396"+ + "\u0394\u0001\u0000\u0000\u0000\u0397\u039a\u0003`(\u0000\u0398\u039a\u0003"+ + "Z%\u0000\u0399\u0397\u0001\u0000\u0000\u0000\u0399\u0398\u0001\u0000\u0000"+ + "\u0000\u039a\u039c\u0001\u0000\u0000\u0000\u039b\u039d\u0003b)\u0000\u039c"+ + "\u039b\u0001\u0000\u0000\u0000\u039d\u039e\u0001\u0000\u0000\u0000\u039e"+ + "\u039c\u0001\u0000\u0000\u0000\u039e\u039f\u0001\u0000\u0000\u0000\u039f"+ + "\u03a1\u0001\u0000\u0000\u0000\u03a0\u0390\u0001\u0000\u0000\u0000\u03a0"+ + "\u0399\u0001\u0000\u0000\u0000\u03a1\u00b7\u0001\u0000\u0000\u0000\u03a2"+ + "\u03a4\u0003\\&\u0000\u03a3\u03a5\u0003^\'\u0000\u03a4\u03a3\u0001\u0000"+ + "\u0000\u0000\u03a5\u03a6\u0001\u0000\u0000\u0000\u03a6\u03a4\u0001\u0000"+ + "\u0000\u0000\u03a6\u03a7\u0001\u0000\u0000\u0000\u03a7\u03a8\u0001\u0000"+ + "\u0000\u0000\u03a8\u03a9\u0003\\&\u0000\u03a9\u00b9\u0001\u0000\u0000"+ + "\u0000\u03aa\u03ab\u0003\u00b8T\u0000\u03ab\u00bb\u0001\u0000\u0000\u0000"+ + "\u03ac\u03ad\u0003:\u0015\u0000\u03ad\u03ae\u0001\u0000\u0000\u0000\u03ae"+ + "\u03af\u0006V\u000b\u0000\u03af\u00bd\u0001\u0000\u0000\u0000\u03b0\u03b1"+ + "\u0003<\u0016\u0000\u03b1\u03b2\u0001\u0000\u0000\u0000\u03b2\u03b3\u0006"+ + "W\u000b\u0000\u03b3\u00bf\u0001\u0000\u0000\u0000\u03b4\u03b5\u0003>\u0017"+ + "\u0000\u03b5\u03b6\u0001\u0000\u0000\u0000\u03b6\u03b7\u0006X\u000b\u0000"+ + "\u03b7\u00c1\u0001\u0000\u0000\u0000\u03b8\u03b9\u0003N\u001f\u0000\u03b9"+ + "\u03ba\u0001\u0000\u0000\u0000\u03ba\u03bb\u0006Y\u000e\u0000\u03bb\u03bc"+ + "\u0006Y\u000f\u0000\u03bc\u00c3\u0001\u0000\u0000\u0000\u03bd\u03be\u0003"+ + "\u00b2Q\u0000\u03be\u03bf\u0001\u0000\u0000\u0000\u03bf\u03c0\u0006Z\f"+ + "\u0000\u03c0\u00c5\u0001\u0000\u0000\u0000\u03c1\u03c2\u0003\u00b4R\u0000"+ + "\u03c2\u03c3\u0001\u0000\u0000\u0000\u03c3\u03c4\u0006[\u0010\u0000\u03c4"+ + "\u00c7\u0001\u0000\u0000\u0000\u03c5\u03c6\u0003\u016c\u00ae\u0000\u03c6"+ + "\u03c7\u0001\u0000\u0000\u0000\u03c7\u03c8\u0006\\\u0011\u0000\u03c8\u00c9"+ + "\u0001\u0000\u0000\u0000\u03c9\u03ca\u0003t2\u0000\u03ca\u03cb\u0001\u0000"+ + "\u0000\u0000\u03cb\u03cc\u0006]\u0012\u0000\u03cc\u00cb\u0001\u0000\u0000"+ + "\u0000\u03cd\u03ce\u0003p0\u0000\u03ce\u03cf\u0001\u0000\u0000\u0000\u03cf"+ + "\u03d0\u0006^\u0013\u0000\u03d0\u00cd\u0001\u0000\u0000\u0000\u03d1\u03d2"+ + "\u0005m\u0000\u0000\u03d2\u03d3\u0005e\u0000\u0000\u03d3\u03d4\u0005t"+ + "\u0000\u0000\u03d4\u03d5\u0005a\u0000\u0000\u03d5\u03d6\u0005d\u0000\u0000"+ + "\u03d6\u03d7\u0005a\u0000\u0000\u03d7\u03d8\u0005t\u0000\u0000\u03d8\u03d9"+ + "\u0005a\u0000\u0000\u03d9\u00cf\u0001\u0000\u0000\u0000\u03da\u03db\u0003"+ + "B\u0019\u0000\u03db\u03dc\u0001\u0000\u0000\u0000\u03dc\u03dd\u0006`\u0014"+ + "\u0000\u03dd\u00d1\u0001\u0000\u0000\u0000\u03de\u03df\u0003d*\u0000\u03df"+ + "\u03e0\u0001\u0000\u0000\u0000\u03e0\u03e1\u0006a\u0015\u0000\u03e1\u00d3"+ + "\u0001\u0000\u0000\u0000\u03e2\u03e3\u0003:\u0015\u0000\u03e3\u03e4\u0001"+ + "\u0000\u0000\u0000\u03e4\u03e5\u0006b\u000b\u0000\u03e5\u00d5\u0001\u0000"+ + "\u0000\u0000\u03e6\u03e7\u0003<\u0016\u0000\u03e7\u03e8\u0001\u0000\u0000"+ + "\u0000\u03e8\u03e9\u0006c\u000b\u0000\u03e9\u00d7\u0001\u0000\u0000\u0000"+ + "\u03ea\u03eb\u0003>\u0017\u0000\u03eb\u03ec\u0001\u0000\u0000\u0000\u03ec"+ + "\u03ed\u0006d\u000b\u0000\u03ed\u00d9\u0001\u0000\u0000\u0000\u03ee\u03ef"+ + "\u0003N\u001f\u0000\u03ef\u03f0\u0001\u0000\u0000\u0000\u03f0\u03f1\u0006"+ + "e\u000e\u0000\u03f1\u03f2\u0006e\u000f\u0000\u03f2\u00db\u0001\u0000\u0000"+ + "\u0000\u03f3\u03f4\u0003x4\u0000\u03f4\u03f5\u0001\u0000\u0000\u0000\u03f5"+ + "\u03f6\u0006f\u0016\u0000\u03f6\u00dd\u0001\u0000\u0000\u0000\u03f7\u03f8"+ + "\u0003t2\u0000\u03f8\u03f9\u0001\u0000\u0000\u0000\u03f9\u03fa\u0006g"+ + "\u0012\u0000\u03fa\u00df\u0001\u0000\u0000\u0000\u03fb\u0400\u0003R!\u0000"+ + "\u03fc\u0400\u0003P \u0000\u03fd\u0400\u0003`(\u0000\u03fe\u0400\u0003"+ + "\u00aaM\u0000\u03ff\u03fb\u0001\u0000\u0000\u0000\u03ff\u03fc\u0001\u0000"+ + "\u0000\u0000\u03ff\u03fd\u0001\u0000\u0000\u0000\u03ff\u03fe\u0001\u0000"+ + "\u0000\u0000\u0400\u00e1\u0001\u0000\u0000\u0000\u0401\u0404\u0003R!\u0000"+ + "\u0402\u0404\u0003\u00aaM\u0000\u0403\u0401\u0001\u0000\u0000\u0000\u0403"+ + "\u0402\u0001\u0000\u0000\u0000\u0404\u0408\u0001\u0000\u0000\u0000\u0405"+ + "\u0407\u0003\u00e0h\u0000\u0406\u0405\u0001\u0000\u0000\u0000\u0407\u040a"+ + "\u0001\u0000\u0000\u0000\u0408\u0406\u0001\u0000\u0000\u0000\u0408\u0409"+ + "\u0001\u0000\u0000\u0000\u0409\u0415\u0001\u0000\u0000\u0000\u040a\u0408"+ + "\u0001\u0000\u0000\u0000\u040b\u040e\u0003`(\u0000\u040c\u040e\u0003Z"+ + "%\u0000\u040d\u040b\u0001\u0000\u0000\u0000\u040d\u040c\u0001\u0000\u0000"+ + "\u0000\u040e\u0410\u0001\u0000\u0000\u0000\u040f\u0411\u0003\u00e0h\u0000"+ + "\u0410\u040f\u0001\u0000\u0000\u0000\u0411\u0412\u0001\u0000\u0000\u0000"+ + "\u0412\u0410\u0001\u0000\u0000\u0000\u0412\u0413\u0001\u0000\u0000\u0000"+ + "\u0413\u0415\u0001\u0000\u0000\u0000\u0414\u0403\u0001\u0000\u0000\u0000"+ + "\u0414\u040d\u0001\u0000\u0000\u0000\u0415\u00e3\u0001\u0000\u0000\u0000"+ + "\u0416\u0419\u0003\u00e2i\u0000\u0417\u0419\u0003\u00b8T\u0000\u0418\u0416"+ + "\u0001\u0000\u0000\u0000\u0418\u0417\u0001\u0000\u0000\u0000\u0419\u041a"+ + "\u0001\u0000\u0000\u0000\u041a\u0418\u0001\u0000\u0000\u0000\u041a\u041b"+ + "\u0001\u0000\u0000\u0000\u041b\u00e5\u0001\u0000\u0000\u0000\u041c\u041d"+ + "\u0003:\u0015\u0000\u041d\u041e\u0001\u0000\u0000\u0000\u041e\u041f\u0006"+ + "k\u000b\u0000\u041f\u00e7\u0001\u0000\u0000\u0000\u0420\u0421\u0003<\u0016"+ + "\u0000\u0421\u0422\u0001\u0000\u0000\u0000\u0422\u0423\u0006l\u000b\u0000"+ + "\u0423\u00e9\u0001\u0000\u0000\u0000\u0424\u0425\u0003>\u0017\u0000\u0425"+ + "\u0426\u0001\u0000\u0000\u0000\u0426\u0427\u0006m\u000b\u0000\u0427\u00eb"+ + "\u0001\u0000\u0000\u0000\u0428\u0429\u0003N\u001f\u0000\u0429\u042a\u0001"+ + "\u0000\u0000\u0000\u042a\u042b\u0006n\u000e\u0000\u042b\u042c\u0006n\u000f"+ + "\u0000\u042c\u00ed\u0001\u0000\u0000\u0000\u042d\u042e\u0003p0\u0000\u042e"+ + "\u042f\u0001\u0000\u0000\u0000\u042f\u0430\u0006o\u0013\u0000\u0430\u00ef"+ + "\u0001\u0000\u0000\u0000\u0431\u0432\u0003t2\u0000\u0432\u0433\u0001\u0000"+ + "\u0000\u0000\u0433\u0434\u0006p\u0012\u0000\u0434\u00f1\u0001\u0000\u0000"+ + "\u0000\u0435\u0436\u0003x4\u0000\u0436\u0437\u0001\u0000\u0000\u0000\u0437"+ + "\u0438\u0006q\u0016\u0000\u0438\u00f3\u0001\u0000\u0000\u0000\u0439\u043a"+ + "\u0005a\u0000\u0000\u043a\u043b\u0005s\u0000\u0000\u043b\u00f5\u0001\u0000"+ + "\u0000\u0000\u043c\u043d\u0003\u00e4j\u0000\u043d\u043e\u0001\u0000\u0000"+ + "\u0000\u043e\u043f\u0006s\u0017\u0000\u043f\u00f7\u0001\u0000\u0000\u0000"+ + "\u0440\u0441\u0003:\u0015\u0000\u0441\u0442\u0001\u0000\u0000\u0000\u0442"+ + "\u0443\u0006t\u000b\u0000\u0443\u00f9\u0001\u0000\u0000\u0000\u0444\u0445"+ + "\u0003<\u0016\u0000\u0445\u0446\u0001\u0000\u0000\u0000\u0446\u0447\u0006"+ + "u\u000b\u0000\u0447\u00fb\u0001\u0000\u0000\u0000\u0448\u0449\u0003>\u0017"+ + "\u0000\u0449\u044a\u0001\u0000\u0000\u0000\u044a\u044b\u0006v\u000b\u0000"+ + "\u044b\u00fd\u0001\u0000\u0000\u0000\u044c\u044d\u0003N\u001f\u0000\u044d"+ + "\u044e\u0001\u0000\u0000\u0000\u044e\u044f\u0006w\u000e\u0000\u044f\u0450"+ + "\u0006w\u000f\u0000\u0450\u00ff\u0001\u0000\u0000\u0000\u0451\u0452\u0003"+ + "\u00b2Q\u0000\u0452\u0453\u0001\u0000\u0000\u0000\u0453\u0454\u0006x\f"+ + "\u0000\u0454\u0455\u0006x\u0018\u0000\u0455\u0101\u0001\u0000\u0000\u0000"+ + "\u0456\u0457\u0005o\u0000\u0000\u0457\u0458\u0005n\u0000\u0000\u0458\u0459"+ + "\u0001\u0000\u0000\u0000\u0459\u045a\u0006y\u0019\u0000\u045a\u0103\u0001"+ + "\u0000\u0000\u0000\u045b\u045c\u0005w\u0000\u0000\u045c\u045d\u0005i\u0000"+ + "\u0000\u045d\u045e\u0005t\u0000\u0000\u045e\u045f\u0005h\u0000\u0000\u045f"+ + "\u0460\u0001\u0000\u0000\u0000\u0460\u0461\u0006z\u0019\u0000\u0461\u0105"+ + "\u0001\u0000\u0000\u0000\u0462\u0463\b\f\u0000\u0000\u0463\u0107\u0001"+ + "\u0000\u0000\u0000\u0464\u0466\u0003\u0106{\u0000\u0465\u0464\u0001\u0000"+ + "\u0000\u0000\u0466\u0467\u0001\u0000\u0000\u0000\u0467\u0465\u0001\u0000"+ + "\u0000\u0000\u0467\u0468\u0001\u0000\u0000\u0000\u0468\u0469\u0001\u0000"+ + "\u0000\u0000\u0469\u046a\u0003\u016c\u00ae\u0000\u046a\u046c\u0001\u0000"+ + "\u0000\u0000\u046b\u0465\u0001\u0000\u0000\u0000\u046b\u046c\u0001\u0000"+ + "\u0000\u0000\u046c\u046e\u0001\u0000\u0000\u0000\u046d\u046f\u0003\u0106"+ + "{\u0000\u046e\u046d\u0001\u0000\u0000\u0000\u046f\u0470\u0001\u0000\u0000"+ + "\u0000\u0470\u046e\u0001\u0000\u0000\u0000\u0470\u0471\u0001\u0000\u0000"+ + "\u0000\u0471\u0109\u0001\u0000\u0000\u0000\u0472\u0473\u0003\u0108|\u0000"+ + "\u0473\u0474\u0001\u0000\u0000\u0000\u0474\u0475\u0006}\u001a\u0000\u0475"+ + "\u010b\u0001\u0000\u0000\u0000\u0476\u0477\u0003:\u0015\u0000\u0477\u0478"+ + "\u0001\u0000\u0000\u0000\u0478\u0479\u0006~\u000b\u0000\u0479\u010d\u0001"+ + "\u0000\u0000\u0000\u047a\u047b\u0003<\u0016\u0000\u047b\u047c\u0001\u0000"+ + "\u0000\u0000\u047c\u047d\u0006\u007f\u000b\u0000\u047d\u010f\u0001\u0000"+ + "\u0000\u0000\u047e\u047f\u0003>\u0017\u0000\u047f\u0480\u0001\u0000\u0000"+ + "\u0000\u0480\u0481\u0006\u0080\u000b\u0000\u0481\u0111\u0001\u0000\u0000"+ + "\u0000\u0482\u0483\u0003N\u001f\u0000\u0483\u0484\u0001\u0000\u0000\u0000"+ + "\u0484\u0485\u0006\u0081\u000e\u0000\u0485\u0486\u0006\u0081\u000f\u0000"+ + "\u0486\u0487\u0006\u0081\u000f\u0000\u0487\u0113\u0001\u0000\u0000\u0000"+ + "\u0488\u0489\u0003p0\u0000\u0489\u048a\u0001\u0000\u0000\u0000\u048a\u048b"+ + "\u0006\u0082\u0013\u0000\u048b\u0115\u0001\u0000\u0000\u0000\u048c\u048d"+ + "\u0003t2\u0000\u048d\u048e\u0001\u0000\u0000\u0000\u048e\u048f\u0006\u0083"+ + "\u0012\u0000\u048f\u0117\u0001\u0000\u0000\u0000\u0490\u0491\u0003x4\u0000"+ + "\u0491\u0492\u0001\u0000\u0000\u0000\u0492\u0493\u0006\u0084\u0016\u0000"+ + "\u0493\u0119\u0001\u0000\u0000\u0000\u0494\u0495\u0003\u0104z\u0000\u0495"+ + "\u0496\u0001\u0000\u0000\u0000\u0496\u0497\u0006\u0085\u001b\u0000\u0497"+ + "\u011b\u0001\u0000\u0000\u0000\u0498\u0499\u0003\u00e4j\u0000\u0499\u049a"+ + "\u0001\u0000\u0000\u0000\u049a\u049b\u0006\u0086\u0017\u0000\u049b\u011d"+ + "\u0001\u0000\u0000\u0000\u049c\u049d\u0003\u00baU\u0000\u049d\u049e\u0001"+ + "\u0000\u0000\u0000\u049e\u049f\u0006\u0087\u001c\u0000\u049f\u011f\u0001"+ + "\u0000\u0000\u0000\u04a0\u04a1\u0003:\u0015\u0000\u04a1\u04a2\u0001\u0000"+ + "\u0000\u0000\u04a2\u04a3\u0006\u0088\u000b\u0000\u04a3\u0121\u0001\u0000"+ + "\u0000\u0000\u04a4\u04a5\u0003<\u0016\u0000\u04a5\u04a6\u0001\u0000\u0000"+ + "\u0000\u04a6\u04a7\u0006\u0089\u000b\u0000\u04a7\u0123\u0001\u0000\u0000"+ + "\u0000\u04a8\u04a9\u0003>\u0017\u0000\u04a9\u04aa\u0001\u0000\u0000\u0000"+ + "\u04aa\u04ab\u0006\u008a\u000b\u0000\u04ab\u0125\u0001\u0000\u0000\u0000"+ + "\u04ac\u04ad\u0003N\u001f\u0000\u04ad\u04ae\u0001\u0000\u0000\u0000\u04ae"+ + "\u04af\u0006\u008b\u000e\u0000\u04af\u04b0\u0006\u008b\u000f\u0000\u04b0"+ + "\u0127\u0001\u0000\u0000\u0000\u04b1\u04b2\u0003\u016c\u00ae\u0000\u04b2"+ + "\u04b3\u0001\u0000\u0000\u0000\u04b3\u04b4\u0006\u008c\u0011\u0000\u04b4"+ + "\u0129\u0001\u0000\u0000\u0000\u04b5\u04b6\u0003t2\u0000\u04b6\u04b7\u0001"+ + "\u0000\u0000\u0000\u04b7\u04b8\u0006\u008d\u0012\u0000\u04b8\u012b\u0001"+ + "\u0000\u0000\u0000\u04b9\u04ba\u0003x4\u0000\u04ba\u04bb\u0001\u0000\u0000"+ + "\u0000\u04bb\u04bc\u0006\u008e\u0016\u0000\u04bc\u012d\u0001\u0000\u0000"+ + "\u0000\u04bd\u04be\u0003\u0102y\u0000\u04be\u04bf\u0001\u0000\u0000\u0000"+ + "\u04bf\u04c0\u0006\u008f\u001d\u0000\u04c0\u04c1\u0006\u008f\u001e\u0000"+ + "\u04c1\u012f\u0001\u0000\u0000\u0000\u04c2\u04c3\u0003B\u0019\u0000\u04c3"+ + "\u04c4\u0001\u0000\u0000\u0000\u04c4\u04c5\u0006\u0090\u0014\u0000\u04c5"+ + "\u0131\u0001\u0000\u0000\u0000\u04c6\u04c7\u0003d*\u0000\u04c7\u04c8\u0001"+ + "\u0000\u0000\u0000\u04c8\u04c9\u0006\u0091\u0015\u0000\u04c9\u0133\u0001"+ + "\u0000\u0000\u0000\u04ca\u04cb\u0003:\u0015\u0000\u04cb\u04cc\u0001\u0000"+ + "\u0000\u0000\u04cc\u04cd\u0006\u0092\u000b\u0000\u04cd\u0135\u0001\u0000"+ + "\u0000\u0000\u04ce\u04cf\u0003<\u0016\u0000\u04cf\u04d0\u0001\u0000\u0000"+ + "\u0000\u04d0\u04d1\u0006\u0093\u000b\u0000\u04d1\u0137\u0001\u0000\u0000"+ + "\u0000\u04d2\u04d3\u0003>\u0017\u0000\u04d3\u04d4\u0001\u0000\u0000\u0000"+ + "\u04d4\u04d5\u0006\u0094\u000b\u0000\u04d5\u0139\u0001\u0000\u0000\u0000"+ + "\u04d6\u04d7\u0003N\u001f\u0000\u04d7\u04d8\u0001\u0000\u0000\u0000\u04d8"+ + "\u04d9\u0006\u0095\u000e\u0000\u04d9\u04da\u0006\u0095\u000f\u0000\u04da"+ + "\u04db\u0006\u0095\u000f\u0000\u04db\u013b\u0001\u0000\u0000\u0000\u04dc"+ + "\u04dd\u0003t2\u0000\u04dd\u04de\u0001\u0000\u0000\u0000\u04de\u04df\u0006"+ + "\u0096\u0012\u0000\u04df\u013d\u0001\u0000\u0000\u0000\u04e0\u04e1\u0003"+ + "x4\u0000\u04e1\u04e2\u0001\u0000\u0000\u0000\u04e2\u04e3\u0006\u0097\u0016"+ + "\u0000\u04e3\u013f\u0001\u0000\u0000\u0000\u04e4\u04e5\u0003\u00e4j\u0000"+ + "\u04e5\u04e6\u0001\u0000\u0000\u0000\u04e6\u04e7\u0006\u0098\u0017\u0000"+ + "\u04e7\u0141\u0001\u0000\u0000\u0000\u04e8\u04e9\u0003:\u0015\u0000\u04e9"+ + "\u04ea\u0001\u0000\u0000\u0000\u04ea\u04eb\u0006\u0099\u000b\u0000\u04eb"+ + "\u0143\u0001\u0000\u0000\u0000\u04ec\u04ed\u0003<\u0016\u0000\u04ed\u04ee"+ + "\u0001\u0000\u0000\u0000\u04ee\u04ef\u0006\u009a\u000b\u0000\u04ef\u0145"+ + "\u0001\u0000\u0000\u0000\u04f0\u04f1\u0003>\u0017\u0000\u04f1\u04f2\u0001"+ + "\u0000\u0000\u0000\u04f2\u04f3\u0006\u009b\u000b\u0000\u04f3\u0147\u0001"+ + "\u0000\u0000\u0000\u04f4\u04f5\u0003N\u001f\u0000\u04f5\u04f6\u0001\u0000"+ + "\u0000\u0000\u04f6\u04f7\u0006\u009c\u000e\u0000\u04f7\u04f8\u0006\u009c"+ + "\u000f\u0000\u04f8\u0149\u0001\u0000\u0000\u0000\u04f9\u04fa\u0003x4\u0000"+ + "\u04fa\u04fb\u0001\u0000\u0000\u0000\u04fb\u04fc\u0006\u009d\u0016\u0000"+ + "\u04fc\u014b\u0001\u0000\u0000\u0000\u04fd\u04fe\u0003\u00baU\u0000\u04fe"+ + "\u04ff\u0001\u0000\u0000\u0000\u04ff\u0500\u0006\u009e\u001c\u0000\u0500"+ + "\u014d\u0001\u0000\u0000\u0000\u0501\u0502\u0003\u00b6S\u0000\u0502\u0503"+ + "\u0001\u0000\u0000\u0000\u0503\u0504\u0006\u009f\u001f\u0000\u0504\u014f"+ + "\u0001\u0000\u0000\u0000\u0505\u0506\u0003:\u0015\u0000\u0506\u0507\u0001"+ + "\u0000\u0000\u0000\u0507\u0508\u0006\u00a0\u000b\u0000\u0508\u0151\u0001"+ + "\u0000\u0000\u0000\u0509\u050a\u0003<\u0016\u0000\u050a\u050b\u0001\u0000"+ + "\u0000\u0000\u050b\u050c\u0006\u00a1\u000b\u0000\u050c\u0153\u0001\u0000"+ + "\u0000\u0000\u050d\u050e\u0003>\u0017\u0000\u050e\u050f\u0001\u0000\u0000"+ + "\u0000\u050f\u0510\u0006\u00a2\u000b\u0000\u0510\u0155\u0001\u0000\u0000"+ + "\u0000\u0511\u0512\u0003N\u001f\u0000\u0512\u0513\u0001\u0000\u0000\u0000"+ + "\u0513\u0514\u0006\u00a3\u000e\u0000\u0514\u0515\u0006\u00a3\u000f\u0000"+ + "\u0515\u0157\u0001\u0000\u0000\u0000\u0516\u0517\u0005i\u0000\u0000\u0517"+ + "\u0518\u0005n\u0000\u0000\u0518\u0519\u0005f\u0000\u0000\u0519\u051a\u0005"+ + "o\u0000\u0000\u051a\u0159\u0001\u0000\u0000\u0000\u051b\u051c\u0003:\u0015"+ + "\u0000\u051c\u051d\u0001\u0000\u0000\u0000\u051d\u051e\u0006\u00a5\u000b"+ + "\u0000\u051e\u015b\u0001\u0000\u0000\u0000\u051f\u0520\u0003<\u0016\u0000"+ + "\u0520\u0521\u0001\u0000\u0000\u0000\u0521\u0522\u0006\u00a6\u000b\u0000"+ + "\u0522\u015d\u0001\u0000\u0000\u0000\u0523\u0524\u0003>\u0017\u0000\u0524"+ + "\u0525\u0001\u0000\u0000\u0000\u0525\u0526\u0006\u00a7\u000b\u0000\u0526"+ + "\u015f\u0001\u0000\u0000\u0000\u0527\u0528\u0003N\u001f\u0000\u0528\u0529"+ + "\u0001\u0000\u0000\u0000\u0529\u052a\u0006\u00a8\u000e\u0000\u052a\u052b"+ + "\u0006\u00a8\u000f\u0000\u052b\u0161\u0001\u0000\u0000\u0000\u052c\u052d"+ + "\u0005f\u0000\u0000\u052d\u052e\u0005u\u0000\u0000\u052e\u052f\u0005n"+ + "\u0000\u0000\u052f\u0530\u0005c\u0000\u0000\u0530\u0531\u0005t\u0000\u0000"+ + "\u0531\u0532\u0005i\u0000\u0000\u0532\u0533\u0005o\u0000\u0000\u0533\u0534"+ + "\u0005n\u0000\u0000\u0534\u0535\u0005s\u0000\u0000\u0535\u0163\u0001\u0000"+ + "\u0000\u0000\u0536\u0537\u0003:\u0015\u0000\u0537\u0538\u0001\u0000\u0000"+ + "\u0000\u0538\u0539\u0006\u00aa\u000b\u0000\u0539\u0165\u0001\u0000\u0000"+ + "\u0000\u053a\u053b\u0003<\u0016\u0000\u053b\u053c\u0001\u0000\u0000\u0000"+ + "\u053c\u053d\u0006\u00ab\u000b\u0000\u053d\u0167\u0001\u0000\u0000\u0000"+ + "\u053e\u053f\u0003>\u0017\u0000\u053f\u0540\u0001\u0000\u0000\u0000\u0540"+ + "\u0541\u0006\u00ac\u000b\u0000\u0541\u0169\u0001\u0000\u0000\u0000\u0542"+ + "\u0543\u0003\u00b4R\u0000\u0543\u0544\u0001\u0000\u0000\u0000\u0544\u0545"+ + "\u0006\u00ad\u0010\u0000\u0545\u0546\u0006\u00ad\u000f\u0000\u0546\u016b"+ + "\u0001\u0000\u0000\u0000\u0547\u0548\u0005:\u0000\u0000\u0548\u016d\u0001"+ + "\u0000\u0000\u0000\u0549\u054f\u0003Z%\u0000\u054a\u054f\u0003P \u0000"+ + "\u054b\u054f\u0003x4\u0000\u054c\u054f\u0003R!\u0000\u054d\u054f\u0003"+ + "`(\u0000\u054e\u0549\u0001\u0000\u0000\u0000\u054e\u054a\u0001\u0000\u0000"+ + "\u0000\u054e\u054b\u0001\u0000\u0000\u0000\u054e\u054c\u0001\u0000\u0000"+ + "\u0000\u054e\u054d\u0001\u0000\u0000\u0000\u054f\u0550\u0001\u0000\u0000"+ + "\u0000\u0550\u054e\u0001\u0000\u0000\u0000\u0550\u0551\u0001\u0000\u0000"+ + "\u0000\u0551\u016f\u0001\u0000\u0000\u0000\u0552\u0553\u0003:\u0015\u0000"+ + "\u0553\u0554\u0001\u0000\u0000\u0000\u0554\u0555\u0006\u00b0\u000b\u0000"+ + "\u0555\u0171\u0001\u0000\u0000\u0000\u0556\u0557\u0003<\u0016\u0000\u0557"+ + "\u0558\u0001\u0000\u0000\u0000\u0558\u0559\u0006\u00b1\u000b\u0000\u0559"+ + "\u0173\u0001\u0000\u0000\u0000\u055a\u055b\u0003>\u0017\u0000\u055b\u055c"+ + "\u0001\u0000\u0000\u0000\u055c\u055d\u0006\u00b2\u000b\u0000\u055d\u0175"+ + "\u0001\u0000\u0000\u0000\u055e\u055f\u0003N\u001f\u0000\u055f\u0560\u0001"+ + "\u0000\u0000\u0000\u0560\u0561\u0006\u00b3\u000e\u0000\u0561\u0562\u0006"+ + "\u00b3\u000f\u0000\u0562\u0177\u0001\u0000\u0000\u0000\u0563\u0564\u0003"+ + "B\u0019\u0000\u0564\u0565\u0001\u0000\u0000\u0000\u0565\u0566\u0006\u00b4"+ + "\u0014\u0000\u0566\u0567\u0006\u00b4\u000f\u0000\u0567\u0568\u0006\u00b4"+ + " \u0000\u0568\u0179\u0001\u0000\u0000\u0000\u0569\u056a\u0003d*\u0000"+ + "\u056a\u056b\u0001\u0000\u0000\u0000\u056b\u056c\u0006\u00b5\u0015\u0000"+ + "\u056c\u056d\u0006\u00b5\u000f\u0000\u056d\u056e\u0006\u00b5 \u0000\u056e"+ + "\u017b\u0001\u0000\u0000\u0000\u056f\u0570\u0003:\u0015\u0000\u0570\u0571"+ + "\u0001\u0000\u0000\u0000\u0571\u0572\u0006\u00b6\u000b\u0000\u0572\u017d"+ + "\u0001\u0000\u0000\u0000\u0573\u0574\u0003<\u0016\u0000\u0574\u0575\u0001"+ + "\u0000\u0000\u0000\u0575\u0576\u0006\u00b7\u000b\u0000\u0576\u017f\u0001"+ + "\u0000\u0000\u0000\u0577\u0578\u0003>\u0017\u0000\u0578\u0579\u0001\u0000"+ + "\u0000\u0000\u0579\u057a\u0006\u00b8\u000b\u0000\u057a\u0181\u0001\u0000"+ + "\u0000\u0000\u057b\u057c\u0003\u016c\u00ae\u0000\u057c\u057d\u0001\u0000"+ + "\u0000\u0000\u057d\u057e\u0006\u00b9\u0011\u0000\u057e\u057f\u0006\u00b9"+ + "\u000f\u0000\u057f\u0580\u0006\u00b9\u0007\u0000\u0580\u0183\u0001\u0000"+ + "\u0000\u0000\u0581\u0582\u0003t2\u0000\u0582\u0583\u0001\u0000\u0000\u0000"+ + "\u0583\u0584\u0006\u00ba\u0012\u0000\u0584\u0585\u0006\u00ba\u000f\u0000"+ + "\u0585\u0586\u0006\u00ba\u0007\u0000\u0586\u0185\u0001\u0000\u0000\u0000"+ + "\u0587\u0588\u0003:\u0015\u0000\u0588\u0589\u0001\u0000\u0000\u0000\u0589"+ + "\u058a\u0006\u00bb\u000b\u0000\u058a\u0187\u0001\u0000\u0000\u0000\u058b"+ + "\u058c\u0003<\u0016\u0000\u058c\u058d\u0001\u0000\u0000\u0000\u058d\u058e"+ + "\u0006\u00bc\u000b\u0000\u058e\u0189\u0001\u0000\u0000\u0000\u058f\u0590"+ + "\u0003>\u0017\u0000\u0590\u0591\u0001\u0000\u0000\u0000\u0591\u0592\u0006"+ + "\u00bd\u000b\u0000\u0592\u018b\u0001\u0000\u0000\u0000\u0593\u0594\u0003"+ + "\u00baU\u0000\u0594\u0595\u0001\u0000\u0000\u0000\u0595\u0596\u0006\u00be"+ + "\u000f\u0000\u0596\u0597\u0006\u00be\u0000\u0000\u0597\u0598\u0006\u00be"+ + "\u001c\u0000\u0598\u018d\u0001\u0000\u0000\u0000\u0599\u059a\u0003\u00b6"+ + "S\u0000\u059a\u059b\u0001\u0000\u0000\u0000\u059b\u059c\u0006\u00bf\u000f"+ + "\u0000\u059c\u059d\u0006\u00bf\u0000\u0000\u059d\u059e\u0006\u00bf\u001f"+ + "\u0000\u059e\u018f\u0001\u0000\u0000\u0000\u059f\u05a0\u0003j-\u0000\u05a0"+ + "\u05a1\u0001\u0000\u0000\u0000\u05a1\u05a2\u0006\u00c0\u000f\u0000\u05a2"+ + "\u05a3\u0006\u00c0\u0000\u0000\u05a3\u05a4\u0006\u00c0!\u0000\u05a4\u0191"+ + "\u0001\u0000\u0000\u0000\u05a5\u05a6\u0003N\u001f\u0000\u05a6\u05a7\u0001"+ + "\u0000\u0000\u0000\u05a7\u05a8\u0006\u00c1\u000e\u0000\u05a8\u05a9\u0006"+ + "\u00c1\u000f\u0000\u05a9\u0193\u0001\u0000\u0000\u0000A\u0000\u0001\u0002"+ + "\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\u0240\u024a"+ + "\u024e\u0251\u025a\u025c\u0267\u026e\u0273\u029a\u029f\u02a8\u02af\u02b4"+ + "\u02b6\u02c1\u02c9\u02cc\u02ce\u02d3\u02d8\u02de\u02e5\u02ea\u02f0\u02f3"+ + "\u02fb\u02ff\u037b\u0382\u0384\u0394\u0399\u039e\u03a0\u03a6\u03ff\u0403"+ + "\u0408\u040d\u0412\u0414\u0418\u041a\u0467\u046b\u0470\u054e\u0550\"\u0005"+ + "\u0002\u0000\u0005\u0004\u0000\u0005\u0006\u0000\u0005\u0001\u0000\u0005"+ + "\u0003\u0000\u0005\b\u0000\u0005\f\u0000\u0005\u000e\u0000\u0005\n\u0000"+ + "\u0005\u0005\u0000\u0005\u000b\u0000\u0000\u0001\u0000\u0007E\u0000\u0005"+ + "\u0000\u0000\u0007\u001d\u0000\u0004\u0000\u0000\u0007F\u0000\u0007r\u0000"+ + "\u0007&\u0000\u0007$\u0000\u0007\u0019\u0000\u0007\u001e\u0000\u0007("+ + "\u0000\u0007P\u0000\u0005\r\u0000\u0005\u0007\u0000\u0007Z\u0000\u0007"+ + "Y\u0000\u0007H\u0000\u0007X\u0000\u0005\t\u0000\u0007G\u0000\u0005\u000f"+ + "\u0000\u0007!\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp index 5900020590110..6c5edef9e98f0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp @@ -151,7 +151,7 @@ UNKNOWN_CMD LINE_COMMENT MULTILINE_COMMENT WS -INDEX_UNQUOTED_IDENTIFIER +UNQUOTED_SOURCE EXPLAIN_WS EXPLAIN_LINE_COMMENT EXPLAIN_MULTILINE_COMMENT @@ -269,7 +269,9 @@ rowCommand fields field fromCommand -indexIdentifier +indexPattern +clusterString +indexString metadata metadataOption deprecated_metadata @@ -312,4 +314,4 @@ lookupCommand atn: -[4, 1, 124, 554, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 122, 8, 1, 10, 1, 12, 1, 125, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 133, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 149, 8, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 161, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 168, 8, 5, 10, 5, 12, 5, 171, 9, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 178, 8, 5, 1, 5, 1, 5, 3, 5, 182, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 190, 8, 5, 10, 5, 12, 5, 193, 9, 5, 1, 6, 1, 6, 3, 6, 197, 8, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 204, 8, 6, 1, 6, 1, 6, 1, 6, 3, 6, 209, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 216, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 222, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 5, 8, 230, 8, 8, 10, 8, 12, 8, 233, 9, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 243, 8, 9, 1, 9, 1, 9, 1, 9, 5, 9, 248, 8, 9, 10, 9, 12, 9, 251, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5, 10, 259, 8, 10, 10, 10, 12, 10, 262, 9, 10, 3, 10, 264, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 5, 13, 276, 8, 13, 10, 13, 12, 13, 279, 9, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 3, 14, 286, 8, 14, 1, 15, 1, 15, 1, 15, 1, 15, 5, 15, 292, 8, 15, 10, 15, 12, 15, 295, 9, 15, 1, 15, 3, 15, 298, 8, 15, 1, 16, 1, 16, 1, 17, 1, 17, 3, 17, 304, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 5, 18, 310, 8, 18, 10, 18, 12, 18, 313, 9, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 5, 20, 323, 8, 20, 10, 20, 12, 20, 326, 9, 20, 1, 20, 3, 20, 329, 8, 20, 1, 20, 1, 20, 3, 20, 333, 8, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 3, 22, 340, 8, 22, 1, 22, 1, 22, 3, 22, 344, 8, 22, 1, 23, 1, 23, 1, 23, 1, 23, 3, 23, 350, 8, 23, 1, 24, 1, 24, 1, 24, 5, 24, 355, 8, 24, 10, 24, 12, 24, 358, 9, 24, 1, 25, 1, 25, 1, 25, 5, 25, 363, 8, 25, 10, 25, 12, 25, 366, 9, 25, 1, 26, 1, 26, 1, 26, 5, 26, 371, 8, 26, 10, 26, 12, 26, 374, 9, 26, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 393, 8, 29, 10, 29, 12, 29, 396, 9, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 404, 8, 29, 10, 29, 12, 29, 407, 9, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 415, 8, 29, 10, 29, 12, 29, 418, 9, 29, 1, 29, 1, 29, 3, 29, 422, 8, 29, 1, 30, 1, 30, 3, 30, 426, 8, 30, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 5, 32, 435, 8, 32, 10, 32, 12, 32, 438, 9, 32, 1, 33, 1, 33, 3, 33, 442, 8, 33, 1, 33, 1, 33, 3, 33, 446, 8, 33, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 5, 36, 458, 8, 36, 10, 36, 12, 36, 461, 9, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 471, 8, 38, 1, 39, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 5, 41, 483, 8, 41, 10, 41, 12, 41, 486, 9, 41, 1, 42, 1, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 44, 1, 44, 3, 44, 496, 8, 44, 1, 45, 3, 45, 499, 8, 45, 1, 45, 1, 45, 1, 46, 3, 46, 504, 8, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 3, 53, 529, 8, 53, 1, 53, 1, 53, 1, 53, 1, 53, 5, 53, 535, 8, 53, 10, 53, 12, 53, 538, 9, 53, 3, 53, 540, 8, 53, 1, 54, 1, 54, 1, 54, 3, 54, 545, 8, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 0, 4, 2, 10, 16, 18, 56, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 0, 7, 1, 0, 63, 64, 1, 0, 65, 67, 1, 0, 71, 72, 2, 0, 35, 35, 39, 39, 1, 0, 42, 43, 2, 0, 41, 41, 55, 55, 2, 0, 56, 56, 58, 62, 580, 0, 112, 1, 0, 0, 0, 2, 115, 1, 0, 0, 0, 4, 132, 1, 0, 0, 0, 6, 148, 1, 0, 0, 0, 8, 150, 1, 0, 0, 0, 10, 181, 1, 0, 0, 0, 12, 208, 1, 0, 0, 0, 14, 215, 1, 0, 0, 0, 16, 221, 1, 0, 0, 0, 18, 242, 1, 0, 0, 0, 20, 252, 1, 0, 0, 0, 22, 267, 1, 0, 0, 0, 24, 269, 1, 0, 0, 0, 26, 272, 1, 0, 0, 0, 28, 285, 1, 0, 0, 0, 30, 287, 1, 0, 0, 0, 32, 299, 1, 0, 0, 0, 34, 303, 1, 0, 0, 0, 36, 305, 1, 0, 0, 0, 38, 314, 1, 0, 0, 0, 40, 318, 1, 0, 0, 0, 42, 334, 1, 0, 0, 0, 44, 337, 1, 0, 0, 0, 46, 345, 1, 0, 0, 0, 48, 351, 1, 0, 0, 0, 50, 359, 1, 0, 0, 0, 52, 367, 1, 0, 0, 0, 54, 375, 1, 0, 0, 0, 56, 377, 1, 0, 0, 0, 58, 421, 1, 0, 0, 0, 60, 425, 1, 0, 0, 0, 62, 427, 1, 0, 0, 0, 64, 430, 1, 0, 0, 0, 66, 439, 1, 0, 0, 0, 68, 447, 1, 0, 0, 0, 70, 450, 1, 0, 0, 0, 72, 453, 1, 0, 0, 0, 74, 462, 1, 0, 0, 0, 76, 466, 1, 0, 0, 0, 78, 472, 1, 0, 0, 0, 80, 476, 1, 0, 0, 0, 82, 479, 1, 0, 0, 0, 84, 487, 1, 0, 0, 0, 86, 491, 1, 0, 0, 0, 88, 495, 1, 0, 0, 0, 90, 498, 1, 0, 0, 0, 92, 503, 1, 0, 0, 0, 94, 507, 1, 0, 0, 0, 96, 509, 1, 0, 0, 0, 98, 511, 1, 0, 0, 0, 100, 514, 1, 0, 0, 0, 102, 518, 1, 0, 0, 0, 104, 521, 1, 0, 0, 0, 106, 524, 1, 0, 0, 0, 108, 544, 1, 0, 0, 0, 110, 548, 1, 0, 0, 0, 112, 113, 3, 2, 1, 0, 113, 114, 5, 0, 0, 1, 114, 1, 1, 0, 0, 0, 115, 116, 6, 1, -1, 0, 116, 117, 3, 4, 2, 0, 117, 123, 1, 0, 0, 0, 118, 119, 10, 1, 0, 0, 119, 120, 5, 29, 0, 0, 120, 122, 3, 6, 3, 0, 121, 118, 1, 0, 0, 0, 122, 125, 1, 0, 0, 0, 123, 121, 1, 0, 0, 0, 123, 124, 1, 0, 0, 0, 124, 3, 1, 0, 0, 0, 125, 123, 1, 0, 0, 0, 126, 133, 3, 98, 49, 0, 127, 133, 3, 30, 15, 0, 128, 133, 3, 24, 12, 0, 129, 133, 3, 40, 20, 0, 130, 133, 3, 102, 51, 0, 131, 133, 3, 104, 52, 0, 132, 126, 1, 0, 0, 0, 132, 127, 1, 0, 0, 0, 132, 128, 1, 0, 0, 0, 132, 129, 1, 0, 0, 0, 132, 130, 1, 0, 0, 0, 132, 131, 1, 0, 0, 0, 133, 5, 1, 0, 0, 0, 134, 149, 3, 42, 21, 0, 135, 149, 3, 46, 23, 0, 136, 149, 3, 62, 31, 0, 137, 149, 3, 110, 55, 0, 138, 149, 3, 68, 34, 0, 139, 149, 3, 64, 32, 0, 140, 149, 3, 44, 22, 0, 141, 149, 3, 8, 4, 0, 142, 149, 3, 70, 35, 0, 143, 149, 3, 72, 36, 0, 144, 149, 3, 76, 38, 0, 145, 149, 3, 78, 39, 0, 146, 149, 3, 106, 53, 0, 147, 149, 3, 80, 40, 0, 148, 134, 1, 0, 0, 0, 148, 135, 1, 0, 0, 0, 148, 136, 1, 0, 0, 0, 148, 137, 1, 0, 0, 0, 148, 138, 1, 0, 0, 0, 148, 139, 1, 0, 0, 0, 148, 140, 1, 0, 0, 0, 148, 141, 1, 0, 0, 0, 148, 142, 1, 0, 0, 0, 148, 143, 1, 0, 0, 0, 148, 144, 1, 0, 0, 0, 148, 145, 1, 0, 0, 0, 148, 146, 1, 0, 0, 0, 148, 147, 1, 0, 0, 0, 149, 7, 1, 0, 0, 0, 150, 151, 5, 20, 0, 0, 151, 152, 3, 10, 5, 0, 152, 9, 1, 0, 0, 0, 153, 154, 6, 5, -1, 0, 154, 155, 5, 48, 0, 0, 155, 182, 3, 10, 5, 7, 156, 182, 3, 14, 7, 0, 157, 182, 3, 12, 6, 0, 158, 160, 3, 14, 7, 0, 159, 161, 5, 48, 0, 0, 160, 159, 1, 0, 0, 0, 160, 161, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 5, 45, 0, 0, 163, 164, 5, 44, 0, 0, 164, 169, 3, 14, 7, 0, 165, 166, 5, 38, 0, 0, 166, 168, 3, 14, 7, 0, 167, 165, 1, 0, 0, 0, 168, 171, 1, 0, 0, 0, 169, 167, 1, 0, 0, 0, 169, 170, 1, 0, 0, 0, 170, 172, 1, 0, 0, 0, 171, 169, 1, 0, 0, 0, 172, 173, 5, 54, 0, 0, 173, 182, 1, 0, 0, 0, 174, 175, 3, 14, 7, 0, 175, 177, 5, 46, 0, 0, 176, 178, 5, 48, 0, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 179, 1, 0, 0, 0, 179, 180, 5, 49, 0, 0, 180, 182, 1, 0, 0, 0, 181, 153, 1, 0, 0, 0, 181, 156, 1, 0, 0, 0, 181, 157, 1, 0, 0, 0, 181, 158, 1, 0, 0, 0, 181, 174, 1, 0, 0, 0, 182, 191, 1, 0, 0, 0, 183, 184, 10, 4, 0, 0, 184, 185, 5, 34, 0, 0, 185, 190, 3, 10, 5, 5, 186, 187, 10, 3, 0, 0, 187, 188, 5, 51, 0, 0, 188, 190, 3, 10, 5, 4, 189, 183, 1, 0, 0, 0, 189, 186, 1, 0, 0, 0, 190, 193, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 192, 1, 0, 0, 0, 192, 11, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 194, 196, 3, 14, 7, 0, 195, 197, 5, 48, 0, 0, 196, 195, 1, 0, 0, 0, 196, 197, 1, 0, 0, 0, 197, 198, 1, 0, 0, 0, 198, 199, 5, 47, 0, 0, 199, 200, 3, 94, 47, 0, 200, 209, 1, 0, 0, 0, 201, 203, 3, 14, 7, 0, 202, 204, 5, 48, 0, 0, 203, 202, 1, 0, 0, 0, 203, 204, 1, 0, 0, 0, 204, 205, 1, 0, 0, 0, 205, 206, 5, 53, 0, 0, 206, 207, 3, 94, 47, 0, 207, 209, 1, 0, 0, 0, 208, 194, 1, 0, 0, 0, 208, 201, 1, 0, 0, 0, 209, 13, 1, 0, 0, 0, 210, 216, 3, 16, 8, 0, 211, 212, 3, 16, 8, 0, 212, 213, 3, 96, 48, 0, 213, 214, 3, 16, 8, 0, 214, 216, 1, 0, 0, 0, 215, 210, 1, 0, 0, 0, 215, 211, 1, 0, 0, 0, 216, 15, 1, 0, 0, 0, 217, 218, 6, 8, -1, 0, 218, 222, 3, 18, 9, 0, 219, 220, 7, 0, 0, 0, 220, 222, 3, 16, 8, 3, 221, 217, 1, 0, 0, 0, 221, 219, 1, 0, 0, 0, 222, 231, 1, 0, 0, 0, 223, 224, 10, 2, 0, 0, 224, 225, 7, 1, 0, 0, 225, 230, 3, 16, 8, 3, 226, 227, 10, 1, 0, 0, 227, 228, 7, 0, 0, 0, 228, 230, 3, 16, 8, 2, 229, 223, 1, 0, 0, 0, 229, 226, 1, 0, 0, 0, 230, 233, 1, 0, 0, 0, 231, 229, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 17, 1, 0, 0, 0, 233, 231, 1, 0, 0, 0, 234, 235, 6, 9, -1, 0, 235, 243, 3, 58, 29, 0, 236, 243, 3, 48, 24, 0, 237, 243, 3, 20, 10, 0, 238, 239, 5, 44, 0, 0, 239, 240, 3, 10, 5, 0, 240, 241, 5, 54, 0, 0, 241, 243, 1, 0, 0, 0, 242, 234, 1, 0, 0, 0, 242, 236, 1, 0, 0, 0, 242, 237, 1, 0, 0, 0, 242, 238, 1, 0, 0, 0, 243, 249, 1, 0, 0, 0, 244, 245, 10, 1, 0, 0, 245, 246, 5, 37, 0, 0, 246, 248, 3, 22, 11, 0, 247, 244, 1, 0, 0, 0, 248, 251, 1, 0, 0, 0, 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 19, 1, 0, 0, 0, 251, 249, 1, 0, 0, 0, 252, 253, 3, 54, 27, 0, 253, 263, 5, 44, 0, 0, 254, 264, 5, 65, 0, 0, 255, 260, 3, 10, 5, 0, 256, 257, 5, 38, 0, 0, 257, 259, 3, 10, 5, 0, 258, 256, 1, 0, 0, 0, 259, 262, 1, 0, 0, 0, 260, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 264, 1, 0, 0, 0, 262, 260, 1, 0, 0, 0, 263, 254, 1, 0, 0, 0, 263, 255, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 265, 1, 0, 0, 0, 265, 266, 5, 54, 0, 0, 266, 21, 1, 0, 0, 0, 267, 268, 3, 54, 27, 0, 268, 23, 1, 0, 0, 0, 269, 270, 5, 16, 0, 0, 270, 271, 3, 26, 13, 0, 271, 25, 1, 0, 0, 0, 272, 277, 3, 28, 14, 0, 273, 274, 5, 38, 0, 0, 274, 276, 3, 28, 14, 0, 275, 273, 1, 0, 0, 0, 276, 279, 1, 0, 0, 0, 277, 275, 1, 0, 0, 0, 277, 278, 1, 0, 0, 0, 278, 27, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 280, 286, 3, 10, 5, 0, 281, 282, 3, 48, 24, 0, 282, 283, 5, 36, 0, 0, 283, 284, 3, 10, 5, 0, 284, 286, 1, 0, 0, 0, 285, 280, 1, 0, 0, 0, 285, 281, 1, 0, 0, 0, 286, 29, 1, 0, 0, 0, 287, 288, 5, 6, 0, 0, 288, 293, 3, 32, 16, 0, 289, 290, 5, 38, 0, 0, 290, 292, 3, 32, 16, 0, 291, 289, 1, 0, 0, 0, 292, 295, 1, 0, 0, 0, 293, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 297, 1, 0, 0, 0, 295, 293, 1, 0, 0, 0, 296, 298, 3, 34, 17, 0, 297, 296, 1, 0, 0, 0, 297, 298, 1, 0, 0, 0, 298, 31, 1, 0, 0, 0, 299, 300, 5, 25, 0, 0, 300, 33, 1, 0, 0, 0, 301, 304, 3, 36, 18, 0, 302, 304, 3, 38, 19, 0, 303, 301, 1, 0, 0, 0, 303, 302, 1, 0, 0, 0, 304, 35, 1, 0, 0, 0, 305, 306, 5, 76, 0, 0, 306, 311, 3, 32, 16, 0, 307, 308, 5, 38, 0, 0, 308, 310, 3, 32, 16, 0, 309, 307, 1, 0, 0, 0, 310, 313, 1, 0, 0, 0, 311, 309, 1, 0, 0, 0, 311, 312, 1, 0, 0, 0, 312, 37, 1, 0, 0, 0, 313, 311, 1, 0, 0, 0, 314, 315, 5, 69, 0, 0, 315, 316, 3, 36, 18, 0, 316, 317, 5, 70, 0, 0, 317, 39, 1, 0, 0, 0, 318, 319, 5, 13, 0, 0, 319, 324, 3, 32, 16, 0, 320, 321, 5, 38, 0, 0, 321, 323, 3, 32, 16, 0, 322, 320, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 328, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 329, 3, 26, 13, 0, 328, 327, 1, 0, 0, 0, 328, 329, 1, 0, 0, 0, 329, 332, 1, 0, 0, 0, 330, 331, 5, 33, 0, 0, 331, 333, 3, 26, 13, 0, 332, 330, 1, 0, 0, 0, 332, 333, 1, 0, 0, 0, 333, 41, 1, 0, 0, 0, 334, 335, 5, 4, 0, 0, 335, 336, 3, 26, 13, 0, 336, 43, 1, 0, 0, 0, 337, 339, 5, 19, 0, 0, 338, 340, 3, 26, 13, 0, 339, 338, 1, 0, 0, 0, 339, 340, 1, 0, 0, 0, 340, 343, 1, 0, 0, 0, 341, 342, 5, 33, 0, 0, 342, 344, 3, 26, 13, 0, 343, 341, 1, 0, 0, 0, 343, 344, 1, 0, 0, 0, 344, 45, 1, 0, 0, 0, 345, 346, 5, 8, 0, 0, 346, 349, 3, 26, 13, 0, 347, 348, 5, 33, 0, 0, 348, 350, 3, 26, 13, 0, 349, 347, 1, 0, 0, 0, 349, 350, 1, 0, 0, 0, 350, 47, 1, 0, 0, 0, 351, 356, 3, 54, 27, 0, 352, 353, 5, 40, 0, 0, 353, 355, 3, 54, 27, 0, 354, 352, 1, 0, 0, 0, 355, 358, 1, 0, 0, 0, 356, 354, 1, 0, 0, 0, 356, 357, 1, 0, 0, 0, 357, 49, 1, 0, 0, 0, 358, 356, 1, 0, 0, 0, 359, 364, 3, 56, 28, 0, 360, 361, 5, 40, 0, 0, 361, 363, 3, 56, 28, 0, 362, 360, 1, 0, 0, 0, 363, 366, 1, 0, 0, 0, 364, 362, 1, 0, 0, 0, 364, 365, 1, 0, 0, 0, 365, 51, 1, 0, 0, 0, 366, 364, 1, 0, 0, 0, 367, 372, 3, 50, 25, 0, 368, 369, 5, 38, 0, 0, 369, 371, 3, 50, 25, 0, 370, 368, 1, 0, 0, 0, 371, 374, 1, 0, 0, 0, 372, 370, 1, 0, 0, 0, 372, 373, 1, 0, 0, 0, 373, 53, 1, 0, 0, 0, 374, 372, 1, 0, 0, 0, 375, 376, 7, 2, 0, 0, 376, 55, 1, 0, 0, 0, 377, 378, 5, 80, 0, 0, 378, 57, 1, 0, 0, 0, 379, 422, 5, 49, 0, 0, 380, 381, 3, 92, 46, 0, 381, 382, 5, 71, 0, 0, 382, 422, 1, 0, 0, 0, 383, 422, 3, 90, 45, 0, 384, 422, 3, 92, 46, 0, 385, 422, 3, 86, 43, 0, 386, 422, 3, 60, 30, 0, 387, 422, 3, 94, 47, 0, 388, 389, 5, 69, 0, 0, 389, 394, 3, 88, 44, 0, 390, 391, 5, 38, 0, 0, 391, 393, 3, 88, 44, 0, 392, 390, 1, 0, 0, 0, 393, 396, 1, 0, 0, 0, 394, 392, 1, 0, 0, 0, 394, 395, 1, 0, 0, 0, 395, 397, 1, 0, 0, 0, 396, 394, 1, 0, 0, 0, 397, 398, 5, 70, 0, 0, 398, 422, 1, 0, 0, 0, 399, 400, 5, 69, 0, 0, 400, 405, 3, 86, 43, 0, 401, 402, 5, 38, 0, 0, 402, 404, 3, 86, 43, 0, 403, 401, 1, 0, 0, 0, 404, 407, 1, 0, 0, 0, 405, 403, 1, 0, 0, 0, 405, 406, 1, 0, 0, 0, 406, 408, 1, 0, 0, 0, 407, 405, 1, 0, 0, 0, 408, 409, 5, 70, 0, 0, 409, 422, 1, 0, 0, 0, 410, 411, 5, 69, 0, 0, 411, 416, 3, 94, 47, 0, 412, 413, 5, 38, 0, 0, 413, 415, 3, 94, 47, 0, 414, 412, 1, 0, 0, 0, 415, 418, 1, 0, 0, 0, 416, 414, 1, 0, 0, 0, 416, 417, 1, 0, 0, 0, 417, 419, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 419, 420, 5, 70, 0, 0, 420, 422, 1, 0, 0, 0, 421, 379, 1, 0, 0, 0, 421, 380, 1, 0, 0, 0, 421, 383, 1, 0, 0, 0, 421, 384, 1, 0, 0, 0, 421, 385, 1, 0, 0, 0, 421, 386, 1, 0, 0, 0, 421, 387, 1, 0, 0, 0, 421, 388, 1, 0, 0, 0, 421, 399, 1, 0, 0, 0, 421, 410, 1, 0, 0, 0, 422, 59, 1, 0, 0, 0, 423, 426, 5, 52, 0, 0, 424, 426, 5, 68, 0, 0, 425, 423, 1, 0, 0, 0, 425, 424, 1, 0, 0, 0, 426, 61, 1, 0, 0, 0, 427, 428, 5, 10, 0, 0, 428, 429, 5, 31, 0, 0, 429, 63, 1, 0, 0, 0, 430, 431, 5, 18, 0, 0, 431, 436, 3, 66, 33, 0, 432, 433, 5, 38, 0, 0, 433, 435, 3, 66, 33, 0, 434, 432, 1, 0, 0, 0, 435, 438, 1, 0, 0, 0, 436, 434, 1, 0, 0, 0, 436, 437, 1, 0, 0, 0, 437, 65, 1, 0, 0, 0, 438, 436, 1, 0, 0, 0, 439, 441, 3, 10, 5, 0, 440, 442, 7, 3, 0, 0, 441, 440, 1, 0, 0, 0, 441, 442, 1, 0, 0, 0, 442, 445, 1, 0, 0, 0, 443, 444, 5, 50, 0, 0, 444, 446, 7, 4, 0, 0, 445, 443, 1, 0, 0, 0, 445, 446, 1, 0, 0, 0, 446, 67, 1, 0, 0, 0, 447, 448, 5, 9, 0, 0, 448, 449, 3, 52, 26, 0, 449, 69, 1, 0, 0, 0, 450, 451, 5, 2, 0, 0, 451, 452, 3, 52, 26, 0, 452, 71, 1, 0, 0, 0, 453, 454, 5, 15, 0, 0, 454, 459, 3, 74, 37, 0, 455, 456, 5, 38, 0, 0, 456, 458, 3, 74, 37, 0, 457, 455, 1, 0, 0, 0, 458, 461, 1, 0, 0, 0, 459, 457, 1, 0, 0, 0, 459, 460, 1, 0, 0, 0, 460, 73, 1, 0, 0, 0, 461, 459, 1, 0, 0, 0, 462, 463, 3, 50, 25, 0, 463, 464, 5, 84, 0, 0, 464, 465, 3, 50, 25, 0, 465, 75, 1, 0, 0, 0, 466, 467, 5, 1, 0, 0, 467, 468, 3, 18, 9, 0, 468, 470, 3, 94, 47, 0, 469, 471, 3, 82, 41, 0, 470, 469, 1, 0, 0, 0, 470, 471, 1, 0, 0, 0, 471, 77, 1, 0, 0, 0, 472, 473, 5, 7, 0, 0, 473, 474, 3, 18, 9, 0, 474, 475, 3, 94, 47, 0, 475, 79, 1, 0, 0, 0, 476, 477, 5, 14, 0, 0, 477, 478, 3, 48, 24, 0, 478, 81, 1, 0, 0, 0, 479, 484, 3, 84, 42, 0, 480, 481, 5, 38, 0, 0, 481, 483, 3, 84, 42, 0, 482, 480, 1, 0, 0, 0, 483, 486, 1, 0, 0, 0, 484, 482, 1, 0, 0, 0, 484, 485, 1, 0, 0, 0, 485, 83, 1, 0, 0, 0, 486, 484, 1, 0, 0, 0, 487, 488, 3, 54, 27, 0, 488, 489, 5, 36, 0, 0, 489, 490, 3, 58, 29, 0, 490, 85, 1, 0, 0, 0, 491, 492, 7, 5, 0, 0, 492, 87, 1, 0, 0, 0, 493, 496, 3, 90, 45, 0, 494, 496, 3, 92, 46, 0, 495, 493, 1, 0, 0, 0, 495, 494, 1, 0, 0, 0, 496, 89, 1, 0, 0, 0, 497, 499, 7, 0, 0, 0, 498, 497, 1, 0, 0, 0, 498, 499, 1, 0, 0, 0, 499, 500, 1, 0, 0, 0, 500, 501, 5, 32, 0, 0, 501, 91, 1, 0, 0, 0, 502, 504, 7, 0, 0, 0, 503, 502, 1, 0, 0, 0, 503, 504, 1, 0, 0, 0, 504, 505, 1, 0, 0, 0, 505, 506, 5, 31, 0, 0, 506, 93, 1, 0, 0, 0, 507, 508, 5, 30, 0, 0, 508, 95, 1, 0, 0, 0, 509, 510, 7, 6, 0, 0, 510, 97, 1, 0, 0, 0, 511, 512, 5, 5, 0, 0, 512, 513, 3, 100, 50, 0, 513, 99, 1, 0, 0, 0, 514, 515, 5, 69, 0, 0, 515, 516, 3, 2, 1, 0, 516, 517, 5, 70, 0, 0, 517, 101, 1, 0, 0, 0, 518, 519, 5, 17, 0, 0, 519, 520, 5, 106, 0, 0, 520, 103, 1, 0, 0, 0, 521, 522, 5, 12, 0, 0, 522, 523, 5, 110, 0, 0, 523, 105, 1, 0, 0, 0, 524, 525, 5, 3, 0, 0, 525, 528, 5, 90, 0, 0, 526, 527, 5, 88, 0, 0, 527, 529, 3, 50, 25, 0, 528, 526, 1, 0, 0, 0, 528, 529, 1, 0, 0, 0, 529, 539, 1, 0, 0, 0, 530, 531, 5, 89, 0, 0, 531, 536, 3, 108, 54, 0, 532, 533, 5, 38, 0, 0, 533, 535, 3, 108, 54, 0, 534, 532, 1, 0, 0, 0, 535, 538, 1, 0, 0, 0, 536, 534, 1, 0, 0, 0, 536, 537, 1, 0, 0, 0, 537, 540, 1, 0, 0, 0, 538, 536, 1, 0, 0, 0, 539, 530, 1, 0, 0, 0, 539, 540, 1, 0, 0, 0, 540, 107, 1, 0, 0, 0, 541, 542, 3, 50, 25, 0, 542, 543, 5, 36, 0, 0, 543, 545, 1, 0, 0, 0, 544, 541, 1, 0, 0, 0, 544, 545, 1, 0, 0, 0, 545, 546, 1, 0, 0, 0, 546, 547, 3, 50, 25, 0, 547, 109, 1, 0, 0, 0, 548, 549, 5, 11, 0, 0, 549, 550, 5, 25, 0, 0, 550, 551, 5, 88, 0, 0, 551, 552, 3, 52, 26, 0, 552, 111, 1, 0, 0, 0, 53, 123, 132, 148, 160, 169, 177, 181, 189, 191, 196, 203, 208, 215, 221, 229, 231, 242, 249, 260, 263, 277, 285, 293, 297, 303, 311, 324, 328, 332, 339, 343, 349, 356, 364, 372, 394, 405, 416, 421, 425, 436, 441, 445, 459, 470, 484, 495, 498, 503, 528, 536, 539, 544] \ No newline at end of file +[4, 1, 124, 567, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 126, 8, 1, 10, 1, 12, 1, 129, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 137, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 153, 8, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 165, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 172, 8, 5, 10, 5, 12, 5, 175, 9, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 182, 8, 5, 1, 5, 1, 5, 3, 5, 186, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 194, 8, 5, 10, 5, 12, 5, 197, 9, 5, 1, 6, 1, 6, 3, 6, 201, 8, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 208, 8, 6, 1, 6, 1, 6, 1, 6, 3, 6, 213, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 220, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 226, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 5, 8, 234, 8, 8, 10, 8, 12, 8, 237, 9, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 247, 8, 9, 1, 9, 1, 9, 1, 9, 5, 9, 252, 8, 9, 10, 9, 12, 9, 255, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5, 10, 263, 8, 10, 10, 10, 12, 10, 266, 9, 10, 3, 10, 268, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 5, 13, 280, 8, 13, 10, 13, 12, 13, 283, 9, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 3, 14, 290, 8, 14, 1, 15, 1, 15, 1, 15, 1, 15, 5, 15, 296, 8, 15, 10, 15, 12, 15, 299, 9, 15, 1, 15, 3, 15, 302, 8, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 309, 8, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 3, 19, 317, 8, 19, 1, 20, 1, 20, 1, 20, 1, 20, 5, 20, 323, 8, 20, 10, 20, 12, 20, 326, 9, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 336, 8, 22, 10, 22, 12, 22, 339, 9, 22, 1, 22, 3, 22, 342, 8, 22, 1, 22, 1, 22, 3, 22, 346, 8, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 3, 24, 353, 8, 24, 1, 24, 1, 24, 3, 24, 357, 8, 24, 1, 25, 1, 25, 1, 25, 1, 25, 3, 25, 363, 8, 25, 1, 26, 1, 26, 1, 26, 5, 26, 368, 8, 26, 10, 26, 12, 26, 371, 9, 26, 1, 27, 1, 27, 1, 27, 5, 27, 376, 8, 27, 10, 27, 12, 27, 379, 9, 27, 1, 28, 1, 28, 1, 28, 5, 28, 384, 8, 28, 10, 28, 12, 28, 387, 9, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 5, 31, 406, 8, 31, 10, 31, 12, 31, 409, 9, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 5, 31, 417, 8, 31, 10, 31, 12, 31, 420, 9, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 5, 31, 428, 8, 31, 10, 31, 12, 31, 431, 9, 31, 1, 31, 1, 31, 3, 31, 435, 8, 31, 1, 32, 1, 32, 3, 32, 439, 8, 32, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 448, 8, 34, 10, 34, 12, 34, 451, 9, 34, 1, 35, 1, 35, 3, 35, 455, 8, 35, 1, 35, 1, 35, 3, 35, 459, 8, 35, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 5, 38, 471, 8, 38, 10, 38, 12, 38, 474, 9, 38, 1, 39, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 3, 40, 484, 8, 40, 1, 41, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 5, 43, 496, 8, 43, 10, 43, 12, 43, 499, 9, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 509, 8, 46, 1, 47, 3, 47, 512, 8, 47, 1, 47, 1, 47, 1, 48, 3, 48, 517, 8, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 3, 55, 542, 8, 55, 1, 55, 1, 55, 1, 55, 1, 55, 5, 55, 548, 8, 55, 10, 55, 12, 55, 551, 9, 55, 3, 55, 553, 8, 55, 1, 56, 1, 56, 1, 56, 3, 56, 558, 8, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 0, 4, 2, 10, 16, 18, 58, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 0, 8, 1, 0, 63, 64, 1, 0, 65, 67, 2, 0, 25, 25, 30, 30, 1, 0, 71, 72, 2, 0, 35, 35, 39, 39, 1, 0, 42, 43, 2, 0, 41, 41, 55, 55, 2, 0, 56, 56, 58, 62, 592, 0, 116, 1, 0, 0, 0, 2, 119, 1, 0, 0, 0, 4, 136, 1, 0, 0, 0, 6, 152, 1, 0, 0, 0, 8, 154, 1, 0, 0, 0, 10, 185, 1, 0, 0, 0, 12, 212, 1, 0, 0, 0, 14, 219, 1, 0, 0, 0, 16, 225, 1, 0, 0, 0, 18, 246, 1, 0, 0, 0, 20, 256, 1, 0, 0, 0, 22, 271, 1, 0, 0, 0, 24, 273, 1, 0, 0, 0, 26, 276, 1, 0, 0, 0, 28, 289, 1, 0, 0, 0, 30, 291, 1, 0, 0, 0, 32, 308, 1, 0, 0, 0, 34, 310, 1, 0, 0, 0, 36, 312, 1, 0, 0, 0, 38, 316, 1, 0, 0, 0, 40, 318, 1, 0, 0, 0, 42, 327, 1, 0, 0, 0, 44, 331, 1, 0, 0, 0, 46, 347, 1, 0, 0, 0, 48, 350, 1, 0, 0, 0, 50, 358, 1, 0, 0, 0, 52, 364, 1, 0, 0, 0, 54, 372, 1, 0, 0, 0, 56, 380, 1, 0, 0, 0, 58, 388, 1, 0, 0, 0, 60, 390, 1, 0, 0, 0, 62, 434, 1, 0, 0, 0, 64, 438, 1, 0, 0, 0, 66, 440, 1, 0, 0, 0, 68, 443, 1, 0, 0, 0, 70, 452, 1, 0, 0, 0, 72, 460, 1, 0, 0, 0, 74, 463, 1, 0, 0, 0, 76, 466, 1, 0, 0, 0, 78, 475, 1, 0, 0, 0, 80, 479, 1, 0, 0, 0, 82, 485, 1, 0, 0, 0, 84, 489, 1, 0, 0, 0, 86, 492, 1, 0, 0, 0, 88, 500, 1, 0, 0, 0, 90, 504, 1, 0, 0, 0, 92, 508, 1, 0, 0, 0, 94, 511, 1, 0, 0, 0, 96, 516, 1, 0, 0, 0, 98, 520, 1, 0, 0, 0, 100, 522, 1, 0, 0, 0, 102, 524, 1, 0, 0, 0, 104, 527, 1, 0, 0, 0, 106, 531, 1, 0, 0, 0, 108, 534, 1, 0, 0, 0, 110, 537, 1, 0, 0, 0, 112, 557, 1, 0, 0, 0, 114, 561, 1, 0, 0, 0, 116, 117, 3, 2, 1, 0, 117, 118, 5, 0, 0, 1, 118, 1, 1, 0, 0, 0, 119, 120, 6, 1, -1, 0, 120, 121, 3, 4, 2, 0, 121, 127, 1, 0, 0, 0, 122, 123, 10, 1, 0, 0, 123, 124, 5, 29, 0, 0, 124, 126, 3, 6, 3, 0, 125, 122, 1, 0, 0, 0, 126, 129, 1, 0, 0, 0, 127, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 3, 1, 0, 0, 0, 129, 127, 1, 0, 0, 0, 130, 137, 3, 102, 51, 0, 131, 137, 3, 30, 15, 0, 132, 137, 3, 24, 12, 0, 133, 137, 3, 44, 22, 0, 134, 137, 3, 106, 53, 0, 135, 137, 3, 108, 54, 0, 136, 130, 1, 0, 0, 0, 136, 131, 1, 0, 0, 0, 136, 132, 1, 0, 0, 0, 136, 133, 1, 0, 0, 0, 136, 134, 1, 0, 0, 0, 136, 135, 1, 0, 0, 0, 137, 5, 1, 0, 0, 0, 138, 153, 3, 46, 23, 0, 139, 153, 3, 50, 25, 0, 140, 153, 3, 66, 33, 0, 141, 153, 3, 114, 57, 0, 142, 153, 3, 72, 36, 0, 143, 153, 3, 68, 34, 0, 144, 153, 3, 48, 24, 0, 145, 153, 3, 8, 4, 0, 146, 153, 3, 74, 37, 0, 147, 153, 3, 76, 38, 0, 148, 153, 3, 80, 40, 0, 149, 153, 3, 82, 41, 0, 150, 153, 3, 110, 55, 0, 151, 153, 3, 84, 42, 0, 152, 138, 1, 0, 0, 0, 152, 139, 1, 0, 0, 0, 152, 140, 1, 0, 0, 0, 152, 141, 1, 0, 0, 0, 152, 142, 1, 0, 0, 0, 152, 143, 1, 0, 0, 0, 152, 144, 1, 0, 0, 0, 152, 145, 1, 0, 0, 0, 152, 146, 1, 0, 0, 0, 152, 147, 1, 0, 0, 0, 152, 148, 1, 0, 0, 0, 152, 149, 1, 0, 0, 0, 152, 150, 1, 0, 0, 0, 152, 151, 1, 0, 0, 0, 153, 7, 1, 0, 0, 0, 154, 155, 5, 20, 0, 0, 155, 156, 3, 10, 5, 0, 156, 9, 1, 0, 0, 0, 157, 158, 6, 5, -1, 0, 158, 159, 5, 48, 0, 0, 159, 186, 3, 10, 5, 7, 160, 186, 3, 14, 7, 0, 161, 186, 3, 12, 6, 0, 162, 164, 3, 14, 7, 0, 163, 165, 5, 48, 0, 0, 164, 163, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 166, 1, 0, 0, 0, 166, 167, 5, 45, 0, 0, 167, 168, 5, 44, 0, 0, 168, 173, 3, 14, 7, 0, 169, 170, 5, 38, 0, 0, 170, 172, 3, 14, 7, 0, 171, 169, 1, 0, 0, 0, 172, 175, 1, 0, 0, 0, 173, 171, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 173, 1, 0, 0, 0, 176, 177, 5, 54, 0, 0, 177, 186, 1, 0, 0, 0, 178, 179, 3, 14, 7, 0, 179, 181, 5, 46, 0, 0, 180, 182, 5, 48, 0, 0, 181, 180, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 183, 1, 0, 0, 0, 183, 184, 5, 49, 0, 0, 184, 186, 1, 0, 0, 0, 185, 157, 1, 0, 0, 0, 185, 160, 1, 0, 0, 0, 185, 161, 1, 0, 0, 0, 185, 162, 1, 0, 0, 0, 185, 178, 1, 0, 0, 0, 186, 195, 1, 0, 0, 0, 187, 188, 10, 4, 0, 0, 188, 189, 5, 34, 0, 0, 189, 194, 3, 10, 5, 5, 190, 191, 10, 3, 0, 0, 191, 192, 5, 51, 0, 0, 192, 194, 3, 10, 5, 4, 193, 187, 1, 0, 0, 0, 193, 190, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 11, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 200, 3, 14, 7, 0, 199, 201, 5, 48, 0, 0, 200, 199, 1, 0, 0, 0, 200, 201, 1, 0, 0, 0, 201, 202, 1, 0, 0, 0, 202, 203, 5, 47, 0, 0, 203, 204, 3, 98, 49, 0, 204, 213, 1, 0, 0, 0, 205, 207, 3, 14, 7, 0, 206, 208, 5, 48, 0, 0, 207, 206, 1, 0, 0, 0, 207, 208, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 210, 5, 53, 0, 0, 210, 211, 3, 98, 49, 0, 211, 213, 1, 0, 0, 0, 212, 198, 1, 0, 0, 0, 212, 205, 1, 0, 0, 0, 213, 13, 1, 0, 0, 0, 214, 220, 3, 16, 8, 0, 215, 216, 3, 16, 8, 0, 216, 217, 3, 100, 50, 0, 217, 218, 3, 16, 8, 0, 218, 220, 1, 0, 0, 0, 219, 214, 1, 0, 0, 0, 219, 215, 1, 0, 0, 0, 220, 15, 1, 0, 0, 0, 221, 222, 6, 8, -1, 0, 222, 226, 3, 18, 9, 0, 223, 224, 7, 0, 0, 0, 224, 226, 3, 16, 8, 3, 225, 221, 1, 0, 0, 0, 225, 223, 1, 0, 0, 0, 226, 235, 1, 0, 0, 0, 227, 228, 10, 2, 0, 0, 228, 229, 7, 1, 0, 0, 229, 234, 3, 16, 8, 3, 230, 231, 10, 1, 0, 0, 231, 232, 7, 0, 0, 0, 232, 234, 3, 16, 8, 2, 233, 227, 1, 0, 0, 0, 233, 230, 1, 0, 0, 0, 234, 237, 1, 0, 0, 0, 235, 233, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 17, 1, 0, 0, 0, 237, 235, 1, 0, 0, 0, 238, 239, 6, 9, -1, 0, 239, 247, 3, 62, 31, 0, 240, 247, 3, 52, 26, 0, 241, 247, 3, 20, 10, 0, 242, 243, 5, 44, 0, 0, 243, 244, 3, 10, 5, 0, 244, 245, 5, 54, 0, 0, 245, 247, 1, 0, 0, 0, 246, 238, 1, 0, 0, 0, 246, 240, 1, 0, 0, 0, 246, 241, 1, 0, 0, 0, 246, 242, 1, 0, 0, 0, 247, 253, 1, 0, 0, 0, 248, 249, 10, 1, 0, 0, 249, 250, 5, 37, 0, 0, 250, 252, 3, 22, 11, 0, 251, 248, 1, 0, 0, 0, 252, 255, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 19, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 256, 257, 3, 58, 29, 0, 257, 267, 5, 44, 0, 0, 258, 268, 5, 65, 0, 0, 259, 264, 3, 10, 5, 0, 260, 261, 5, 38, 0, 0, 261, 263, 3, 10, 5, 0, 262, 260, 1, 0, 0, 0, 263, 266, 1, 0, 0, 0, 264, 262, 1, 0, 0, 0, 264, 265, 1, 0, 0, 0, 265, 268, 1, 0, 0, 0, 266, 264, 1, 0, 0, 0, 267, 258, 1, 0, 0, 0, 267, 259, 1, 0, 0, 0, 267, 268, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 270, 5, 54, 0, 0, 270, 21, 1, 0, 0, 0, 271, 272, 3, 58, 29, 0, 272, 23, 1, 0, 0, 0, 273, 274, 5, 16, 0, 0, 274, 275, 3, 26, 13, 0, 275, 25, 1, 0, 0, 0, 276, 281, 3, 28, 14, 0, 277, 278, 5, 38, 0, 0, 278, 280, 3, 28, 14, 0, 279, 277, 1, 0, 0, 0, 280, 283, 1, 0, 0, 0, 281, 279, 1, 0, 0, 0, 281, 282, 1, 0, 0, 0, 282, 27, 1, 0, 0, 0, 283, 281, 1, 0, 0, 0, 284, 290, 3, 10, 5, 0, 285, 286, 3, 52, 26, 0, 286, 287, 5, 36, 0, 0, 287, 288, 3, 10, 5, 0, 288, 290, 1, 0, 0, 0, 289, 284, 1, 0, 0, 0, 289, 285, 1, 0, 0, 0, 290, 29, 1, 0, 0, 0, 291, 292, 5, 6, 0, 0, 292, 297, 3, 32, 16, 0, 293, 294, 5, 38, 0, 0, 294, 296, 3, 32, 16, 0, 295, 293, 1, 0, 0, 0, 296, 299, 1, 0, 0, 0, 297, 295, 1, 0, 0, 0, 297, 298, 1, 0, 0, 0, 298, 301, 1, 0, 0, 0, 299, 297, 1, 0, 0, 0, 300, 302, 3, 38, 19, 0, 301, 300, 1, 0, 0, 0, 301, 302, 1, 0, 0, 0, 302, 31, 1, 0, 0, 0, 303, 304, 3, 34, 17, 0, 304, 305, 5, 114, 0, 0, 305, 306, 3, 36, 18, 0, 306, 309, 1, 0, 0, 0, 307, 309, 3, 36, 18, 0, 308, 303, 1, 0, 0, 0, 308, 307, 1, 0, 0, 0, 309, 33, 1, 0, 0, 0, 310, 311, 5, 25, 0, 0, 311, 35, 1, 0, 0, 0, 312, 313, 7, 2, 0, 0, 313, 37, 1, 0, 0, 0, 314, 317, 3, 40, 20, 0, 315, 317, 3, 42, 21, 0, 316, 314, 1, 0, 0, 0, 316, 315, 1, 0, 0, 0, 317, 39, 1, 0, 0, 0, 318, 319, 5, 76, 0, 0, 319, 324, 5, 25, 0, 0, 320, 321, 5, 38, 0, 0, 321, 323, 5, 25, 0, 0, 322, 320, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 41, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 328, 5, 69, 0, 0, 328, 329, 3, 40, 20, 0, 329, 330, 5, 70, 0, 0, 330, 43, 1, 0, 0, 0, 331, 332, 5, 13, 0, 0, 332, 337, 3, 32, 16, 0, 333, 334, 5, 38, 0, 0, 334, 336, 3, 32, 16, 0, 335, 333, 1, 0, 0, 0, 336, 339, 1, 0, 0, 0, 337, 335, 1, 0, 0, 0, 337, 338, 1, 0, 0, 0, 338, 341, 1, 0, 0, 0, 339, 337, 1, 0, 0, 0, 340, 342, 3, 26, 13, 0, 341, 340, 1, 0, 0, 0, 341, 342, 1, 0, 0, 0, 342, 345, 1, 0, 0, 0, 343, 344, 5, 33, 0, 0, 344, 346, 3, 26, 13, 0, 345, 343, 1, 0, 0, 0, 345, 346, 1, 0, 0, 0, 346, 45, 1, 0, 0, 0, 347, 348, 5, 4, 0, 0, 348, 349, 3, 26, 13, 0, 349, 47, 1, 0, 0, 0, 350, 352, 5, 19, 0, 0, 351, 353, 3, 26, 13, 0, 352, 351, 1, 0, 0, 0, 352, 353, 1, 0, 0, 0, 353, 356, 1, 0, 0, 0, 354, 355, 5, 33, 0, 0, 355, 357, 3, 26, 13, 0, 356, 354, 1, 0, 0, 0, 356, 357, 1, 0, 0, 0, 357, 49, 1, 0, 0, 0, 358, 359, 5, 8, 0, 0, 359, 362, 3, 26, 13, 0, 360, 361, 5, 33, 0, 0, 361, 363, 3, 26, 13, 0, 362, 360, 1, 0, 0, 0, 362, 363, 1, 0, 0, 0, 363, 51, 1, 0, 0, 0, 364, 369, 3, 58, 29, 0, 365, 366, 5, 40, 0, 0, 366, 368, 3, 58, 29, 0, 367, 365, 1, 0, 0, 0, 368, 371, 1, 0, 0, 0, 369, 367, 1, 0, 0, 0, 369, 370, 1, 0, 0, 0, 370, 53, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 372, 377, 3, 60, 30, 0, 373, 374, 5, 40, 0, 0, 374, 376, 3, 60, 30, 0, 375, 373, 1, 0, 0, 0, 376, 379, 1, 0, 0, 0, 377, 375, 1, 0, 0, 0, 377, 378, 1, 0, 0, 0, 378, 55, 1, 0, 0, 0, 379, 377, 1, 0, 0, 0, 380, 385, 3, 54, 27, 0, 381, 382, 5, 38, 0, 0, 382, 384, 3, 54, 27, 0, 383, 381, 1, 0, 0, 0, 384, 387, 1, 0, 0, 0, 385, 383, 1, 0, 0, 0, 385, 386, 1, 0, 0, 0, 386, 57, 1, 0, 0, 0, 387, 385, 1, 0, 0, 0, 388, 389, 7, 3, 0, 0, 389, 59, 1, 0, 0, 0, 390, 391, 5, 80, 0, 0, 391, 61, 1, 0, 0, 0, 392, 435, 5, 49, 0, 0, 393, 394, 3, 96, 48, 0, 394, 395, 5, 71, 0, 0, 395, 435, 1, 0, 0, 0, 396, 435, 3, 94, 47, 0, 397, 435, 3, 96, 48, 0, 398, 435, 3, 90, 45, 0, 399, 435, 3, 64, 32, 0, 400, 435, 3, 98, 49, 0, 401, 402, 5, 69, 0, 0, 402, 407, 3, 92, 46, 0, 403, 404, 5, 38, 0, 0, 404, 406, 3, 92, 46, 0, 405, 403, 1, 0, 0, 0, 406, 409, 1, 0, 0, 0, 407, 405, 1, 0, 0, 0, 407, 408, 1, 0, 0, 0, 408, 410, 1, 0, 0, 0, 409, 407, 1, 0, 0, 0, 410, 411, 5, 70, 0, 0, 411, 435, 1, 0, 0, 0, 412, 413, 5, 69, 0, 0, 413, 418, 3, 90, 45, 0, 414, 415, 5, 38, 0, 0, 415, 417, 3, 90, 45, 0, 416, 414, 1, 0, 0, 0, 417, 420, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 419, 1, 0, 0, 0, 419, 421, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 421, 422, 5, 70, 0, 0, 422, 435, 1, 0, 0, 0, 423, 424, 5, 69, 0, 0, 424, 429, 3, 98, 49, 0, 425, 426, 5, 38, 0, 0, 426, 428, 3, 98, 49, 0, 427, 425, 1, 0, 0, 0, 428, 431, 1, 0, 0, 0, 429, 427, 1, 0, 0, 0, 429, 430, 1, 0, 0, 0, 430, 432, 1, 0, 0, 0, 431, 429, 1, 0, 0, 0, 432, 433, 5, 70, 0, 0, 433, 435, 1, 0, 0, 0, 434, 392, 1, 0, 0, 0, 434, 393, 1, 0, 0, 0, 434, 396, 1, 0, 0, 0, 434, 397, 1, 0, 0, 0, 434, 398, 1, 0, 0, 0, 434, 399, 1, 0, 0, 0, 434, 400, 1, 0, 0, 0, 434, 401, 1, 0, 0, 0, 434, 412, 1, 0, 0, 0, 434, 423, 1, 0, 0, 0, 435, 63, 1, 0, 0, 0, 436, 439, 5, 52, 0, 0, 437, 439, 5, 68, 0, 0, 438, 436, 1, 0, 0, 0, 438, 437, 1, 0, 0, 0, 439, 65, 1, 0, 0, 0, 440, 441, 5, 10, 0, 0, 441, 442, 5, 31, 0, 0, 442, 67, 1, 0, 0, 0, 443, 444, 5, 18, 0, 0, 444, 449, 3, 70, 35, 0, 445, 446, 5, 38, 0, 0, 446, 448, 3, 70, 35, 0, 447, 445, 1, 0, 0, 0, 448, 451, 1, 0, 0, 0, 449, 447, 1, 0, 0, 0, 449, 450, 1, 0, 0, 0, 450, 69, 1, 0, 0, 0, 451, 449, 1, 0, 0, 0, 452, 454, 3, 10, 5, 0, 453, 455, 7, 4, 0, 0, 454, 453, 1, 0, 0, 0, 454, 455, 1, 0, 0, 0, 455, 458, 1, 0, 0, 0, 456, 457, 5, 50, 0, 0, 457, 459, 7, 5, 0, 0, 458, 456, 1, 0, 0, 0, 458, 459, 1, 0, 0, 0, 459, 71, 1, 0, 0, 0, 460, 461, 5, 9, 0, 0, 461, 462, 3, 56, 28, 0, 462, 73, 1, 0, 0, 0, 463, 464, 5, 2, 0, 0, 464, 465, 3, 56, 28, 0, 465, 75, 1, 0, 0, 0, 466, 467, 5, 15, 0, 0, 467, 472, 3, 78, 39, 0, 468, 469, 5, 38, 0, 0, 469, 471, 3, 78, 39, 0, 470, 468, 1, 0, 0, 0, 471, 474, 1, 0, 0, 0, 472, 470, 1, 0, 0, 0, 472, 473, 1, 0, 0, 0, 473, 77, 1, 0, 0, 0, 474, 472, 1, 0, 0, 0, 475, 476, 3, 54, 27, 0, 476, 477, 5, 84, 0, 0, 477, 478, 3, 54, 27, 0, 478, 79, 1, 0, 0, 0, 479, 480, 5, 1, 0, 0, 480, 481, 3, 18, 9, 0, 481, 483, 3, 98, 49, 0, 482, 484, 3, 86, 43, 0, 483, 482, 1, 0, 0, 0, 483, 484, 1, 0, 0, 0, 484, 81, 1, 0, 0, 0, 485, 486, 5, 7, 0, 0, 486, 487, 3, 18, 9, 0, 487, 488, 3, 98, 49, 0, 488, 83, 1, 0, 0, 0, 489, 490, 5, 14, 0, 0, 490, 491, 3, 52, 26, 0, 491, 85, 1, 0, 0, 0, 492, 497, 3, 88, 44, 0, 493, 494, 5, 38, 0, 0, 494, 496, 3, 88, 44, 0, 495, 493, 1, 0, 0, 0, 496, 499, 1, 0, 0, 0, 497, 495, 1, 0, 0, 0, 497, 498, 1, 0, 0, 0, 498, 87, 1, 0, 0, 0, 499, 497, 1, 0, 0, 0, 500, 501, 3, 58, 29, 0, 501, 502, 5, 36, 0, 0, 502, 503, 3, 62, 31, 0, 503, 89, 1, 0, 0, 0, 504, 505, 7, 6, 0, 0, 505, 91, 1, 0, 0, 0, 506, 509, 3, 94, 47, 0, 507, 509, 3, 96, 48, 0, 508, 506, 1, 0, 0, 0, 508, 507, 1, 0, 0, 0, 509, 93, 1, 0, 0, 0, 510, 512, 7, 0, 0, 0, 511, 510, 1, 0, 0, 0, 511, 512, 1, 0, 0, 0, 512, 513, 1, 0, 0, 0, 513, 514, 5, 32, 0, 0, 514, 95, 1, 0, 0, 0, 515, 517, 7, 0, 0, 0, 516, 515, 1, 0, 0, 0, 516, 517, 1, 0, 0, 0, 517, 518, 1, 0, 0, 0, 518, 519, 5, 31, 0, 0, 519, 97, 1, 0, 0, 0, 520, 521, 5, 30, 0, 0, 521, 99, 1, 0, 0, 0, 522, 523, 7, 7, 0, 0, 523, 101, 1, 0, 0, 0, 524, 525, 5, 5, 0, 0, 525, 526, 3, 104, 52, 0, 526, 103, 1, 0, 0, 0, 527, 528, 5, 69, 0, 0, 528, 529, 3, 2, 1, 0, 529, 530, 5, 70, 0, 0, 530, 105, 1, 0, 0, 0, 531, 532, 5, 17, 0, 0, 532, 533, 5, 106, 0, 0, 533, 107, 1, 0, 0, 0, 534, 535, 5, 12, 0, 0, 535, 536, 5, 110, 0, 0, 536, 109, 1, 0, 0, 0, 537, 538, 5, 3, 0, 0, 538, 541, 5, 90, 0, 0, 539, 540, 5, 88, 0, 0, 540, 542, 3, 54, 27, 0, 541, 539, 1, 0, 0, 0, 541, 542, 1, 0, 0, 0, 542, 552, 1, 0, 0, 0, 543, 544, 5, 89, 0, 0, 544, 549, 3, 112, 56, 0, 545, 546, 5, 38, 0, 0, 546, 548, 3, 112, 56, 0, 547, 545, 1, 0, 0, 0, 548, 551, 1, 0, 0, 0, 549, 547, 1, 0, 0, 0, 549, 550, 1, 0, 0, 0, 550, 553, 1, 0, 0, 0, 551, 549, 1, 0, 0, 0, 552, 543, 1, 0, 0, 0, 552, 553, 1, 0, 0, 0, 553, 111, 1, 0, 0, 0, 554, 555, 3, 54, 27, 0, 555, 556, 5, 36, 0, 0, 556, 558, 1, 0, 0, 0, 557, 554, 1, 0, 0, 0, 557, 558, 1, 0, 0, 0, 558, 559, 1, 0, 0, 0, 559, 560, 3, 54, 27, 0, 560, 113, 1, 0, 0, 0, 561, 562, 5, 11, 0, 0, 562, 563, 3, 32, 16, 0, 563, 564, 5, 88, 0, 0, 564, 565, 3, 56, 28, 0, 565, 115, 1, 0, 0, 0, 54, 127, 136, 152, 164, 173, 181, 185, 193, 195, 200, 207, 212, 219, 225, 233, 235, 246, 253, 264, 267, 281, 289, 297, 301, 308, 316, 324, 337, 341, 345, 352, 356, 362, 369, 377, 385, 407, 418, 429, 434, 438, 449, 454, 458, 472, 483, 497, 508, 511, 516, 541, 549, 552, 557] \ No newline at end of file diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java index 6c21529d6a648..afaf57ba1d218 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java @@ -20,62 +20,62 @@ public class EsqlBaseParser extends Parser { DISSECT=1, DROP=2, ENRICH=3, EVAL=4, EXPLAIN=5, FROM=6, GROK=7, INLINESTATS=8, KEEP=9, LIMIT=10, LOOKUP=11, META=12, METRICS=13, MV_EXPAND=14, RENAME=15, ROW=16, SHOW=17, SORT=18, STATS=19, WHERE=20, UNKNOWN_CMD=21, LINE_COMMENT=22, - MULTILINE_COMMENT=23, WS=24, INDEX_UNQUOTED_IDENTIFIER=25, EXPLAIN_WS=26, - EXPLAIN_LINE_COMMENT=27, EXPLAIN_MULTILINE_COMMENT=28, PIPE=29, QUOTED_STRING=30, - INTEGER_LITERAL=31, DECIMAL_LITERAL=32, BY=33, AND=34, ASC=35, ASSIGN=36, - CAST_OP=37, COMMA=38, DESC=39, DOT=40, FALSE=41, FIRST=42, LAST=43, LP=44, - IN=45, IS=46, LIKE=47, NOT=48, NULL=49, NULLS=50, OR=51, PARAM=52, RLIKE=53, - RP=54, TRUE=55, EQ=56, CIEQ=57, NEQ=58, LT=59, LTE=60, GT=61, GTE=62, - PLUS=63, MINUS=64, ASTERISK=65, SLASH=66, PERCENT=67, NAMED_OR_POSITIONAL_PARAM=68, - OPENING_BRACKET=69, CLOSING_BRACKET=70, UNQUOTED_IDENTIFIER=71, QUOTED_IDENTIFIER=72, - EXPR_LINE_COMMENT=73, EXPR_MULTILINE_COMMENT=74, EXPR_WS=75, METADATA=76, - FROM_LINE_COMMENT=77, FROM_MULTILINE_COMMENT=78, FROM_WS=79, ID_PATTERN=80, - PROJECT_LINE_COMMENT=81, PROJECT_MULTILINE_COMMENT=82, PROJECT_WS=83, - AS=84, RENAME_LINE_COMMENT=85, RENAME_MULTILINE_COMMENT=86, RENAME_WS=87, - ON=88, WITH=89, ENRICH_POLICY_NAME=90, ENRICH_LINE_COMMENT=91, ENRICH_MULTILINE_COMMENT=92, - ENRICH_WS=93, ENRICH_FIELD_LINE_COMMENT=94, ENRICH_FIELD_MULTILINE_COMMENT=95, - ENRICH_FIELD_WS=96, LOOKUP_LINE_COMMENT=97, LOOKUP_MULTILINE_COMMENT=98, - LOOKUP_WS=99, LOOKUP_FIELD_LINE_COMMENT=100, LOOKUP_FIELD_MULTILINE_COMMENT=101, - LOOKUP_FIELD_WS=102, MVEXPAND_LINE_COMMENT=103, MVEXPAND_MULTILINE_COMMENT=104, - MVEXPAND_WS=105, INFO=106, SHOW_LINE_COMMENT=107, SHOW_MULTILINE_COMMENT=108, - SHOW_WS=109, FUNCTIONS=110, META_LINE_COMMENT=111, META_MULTILINE_COMMENT=112, - META_WS=113, COLON=114, SETTING=115, SETTING_LINE_COMMENT=116, SETTTING_MULTILINE_COMMENT=117, - SETTING_WS=118, METRICS_LINE_COMMENT=119, METRICS_MULTILINE_COMMENT=120, - METRICS_WS=121, CLOSING_METRICS_LINE_COMMENT=122, CLOSING_METRICS_MULTILINE_COMMENT=123, - CLOSING_METRICS_WS=124; + MULTILINE_COMMENT=23, WS=24, UNQUOTED_SOURCE=25, EXPLAIN_WS=26, EXPLAIN_LINE_COMMENT=27, + EXPLAIN_MULTILINE_COMMENT=28, PIPE=29, QUOTED_STRING=30, INTEGER_LITERAL=31, + DECIMAL_LITERAL=32, BY=33, AND=34, ASC=35, ASSIGN=36, CAST_OP=37, COMMA=38, + DESC=39, DOT=40, FALSE=41, FIRST=42, LAST=43, LP=44, IN=45, IS=46, LIKE=47, + NOT=48, NULL=49, NULLS=50, OR=51, PARAM=52, RLIKE=53, RP=54, TRUE=55, + EQ=56, CIEQ=57, NEQ=58, LT=59, LTE=60, GT=61, GTE=62, PLUS=63, MINUS=64, + ASTERISK=65, SLASH=66, PERCENT=67, NAMED_OR_POSITIONAL_PARAM=68, OPENING_BRACKET=69, + CLOSING_BRACKET=70, UNQUOTED_IDENTIFIER=71, QUOTED_IDENTIFIER=72, EXPR_LINE_COMMENT=73, + EXPR_MULTILINE_COMMENT=74, EXPR_WS=75, METADATA=76, FROM_LINE_COMMENT=77, + FROM_MULTILINE_COMMENT=78, FROM_WS=79, ID_PATTERN=80, PROJECT_LINE_COMMENT=81, + PROJECT_MULTILINE_COMMENT=82, PROJECT_WS=83, AS=84, RENAME_LINE_COMMENT=85, + RENAME_MULTILINE_COMMENT=86, RENAME_WS=87, ON=88, WITH=89, ENRICH_POLICY_NAME=90, + ENRICH_LINE_COMMENT=91, ENRICH_MULTILINE_COMMENT=92, ENRICH_WS=93, ENRICH_FIELD_LINE_COMMENT=94, + ENRICH_FIELD_MULTILINE_COMMENT=95, ENRICH_FIELD_WS=96, LOOKUP_LINE_COMMENT=97, + LOOKUP_MULTILINE_COMMENT=98, LOOKUP_WS=99, LOOKUP_FIELD_LINE_COMMENT=100, + LOOKUP_FIELD_MULTILINE_COMMENT=101, LOOKUP_FIELD_WS=102, MVEXPAND_LINE_COMMENT=103, + MVEXPAND_MULTILINE_COMMENT=104, MVEXPAND_WS=105, INFO=106, SHOW_LINE_COMMENT=107, + SHOW_MULTILINE_COMMENT=108, SHOW_WS=109, FUNCTIONS=110, META_LINE_COMMENT=111, + META_MULTILINE_COMMENT=112, META_WS=113, COLON=114, SETTING=115, SETTING_LINE_COMMENT=116, + SETTTING_MULTILINE_COMMENT=117, SETTING_WS=118, METRICS_LINE_COMMENT=119, + METRICS_MULTILINE_COMMENT=120, METRICS_WS=121, CLOSING_METRICS_LINE_COMMENT=122, + CLOSING_METRICS_MULTILINE_COMMENT=123, CLOSING_METRICS_WS=124; public static final int RULE_singleStatement = 0, RULE_query = 1, RULE_sourceCommand = 2, RULE_processingCommand = 3, RULE_whereCommand = 4, RULE_booleanExpression = 5, RULE_regexBooleanExpression = 6, RULE_valueExpression = 7, RULE_operatorExpression = 8, RULE_primaryExpression = 9, RULE_functionExpression = 10, RULE_dataType = 11, RULE_rowCommand = 12, - RULE_fields = 13, RULE_field = 14, RULE_fromCommand = 15, RULE_indexIdentifier = 16, - RULE_metadata = 17, RULE_metadataOption = 18, RULE_deprecated_metadata = 19, - RULE_metricsCommand = 20, RULE_evalCommand = 21, RULE_statsCommand = 22, - RULE_inlinestatsCommand = 23, RULE_qualifiedName = 24, RULE_qualifiedNamePattern = 25, - RULE_qualifiedNamePatterns = 26, RULE_identifier = 27, RULE_identifierPattern = 28, - RULE_constant = 29, RULE_params = 30, RULE_limitCommand = 31, RULE_sortCommand = 32, - RULE_orderExpression = 33, RULE_keepCommand = 34, RULE_dropCommand = 35, - RULE_renameCommand = 36, RULE_renameClause = 37, RULE_dissectCommand = 38, - RULE_grokCommand = 39, RULE_mvExpandCommand = 40, RULE_commandOptions = 41, - RULE_commandOption = 42, RULE_booleanValue = 43, RULE_numericValue = 44, - RULE_decimalValue = 45, RULE_integerValue = 46, RULE_string = 47, RULE_comparisonOperator = 48, - RULE_explainCommand = 49, RULE_subqueryExpression = 50, RULE_showCommand = 51, - RULE_metaCommand = 52, RULE_enrichCommand = 53, RULE_enrichWithClause = 54, - RULE_lookupCommand = 55; + RULE_fields = 13, RULE_field = 14, RULE_fromCommand = 15, RULE_indexPattern = 16, + RULE_clusterString = 17, RULE_indexString = 18, RULE_metadata = 19, RULE_metadataOption = 20, + RULE_deprecated_metadata = 21, RULE_metricsCommand = 22, RULE_evalCommand = 23, + RULE_statsCommand = 24, RULE_inlinestatsCommand = 25, RULE_qualifiedName = 26, + RULE_qualifiedNamePattern = 27, RULE_qualifiedNamePatterns = 28, RULE_identifier = 29, + RULE_identifierPattern = 30, RULE_constant = 31, RULE_params = 32, RULE_limitCommand = 33, + RULE_sortCommand = 34, RULE_orderExpression = 35, RULE_keepCommand = 36, + RULE_dropCommand = 37, RULE_renameCommand = 38, RULE_renameClause = 39, + RULE_dissectCommand = 40, RULE_grokCommand = 41, RULE_mvExpandCommand = 42, + RULE_commandOptions = 43, RULE_commandOption = 44, RULE_booleanValue = 45, + RULE_numericValue = 46, RULE_decimalValue = 47, RULE_integerValue = 48, + RULE_string = 49, RULE_comparisonOperator = 50, RULE_explainCommand = 51, + RULE_subqueryExpression = 52, RULE_showCommand = 53, RULE_metaCommand = 54, + RULE_enrichCommand = 55, RULE_enrichWithClause = 56, RULE_lookupCommand = 57; private static String[] makeRuleNames() { return new String[] { "singleStatement", "query", "sourceCommand", "processingCommand", "whereCommand", "booleanExpression", "regexBooleanExpression", "valueExpression", "operatorExpression", "primaryExpression", "functionExpression", "dataType", "rowCommand", - "fields", "field", "fromCommand", "indexIdentifier", "metadata", "metadataOption", - "deprecated_metadata", "metricsCommand", "evalCommand", "statsCommand", - "inlinestatsCommand", "qualifiedName", "qualifiedNamePattern", "qualifiedNamePatterns", - "identifier", "identifierPattern", "constant", "params", "limitCommand", - "sortCommand", "orderExpression", "keepCommand", "dropCommand", "renameCommand", - "renameClause", "dissectCommand", "grokCommand", "mvExpandCommand", "commandOptions", - "commandOption", "booleanValue", "numericValue", "decimalValue", "integerValue", - "string", "comparisonOperator", "explainCommand", "subqueryExpression", - "showCommand", "metaCommand", "enrichCommand", "enrichWithClause", "lookupCommand" + "fields", "field", "fromCommand", "indexPattern", "clusterString", "indexString", + "metadata", "metadataOption", "deprecated_metadata", "metricsCommand", + "evalCommand", "statsCommand", "inlinestatsCommand", "qualifiedName", + "qualifiedNamePattern", "qualifiedNamePatterns", "identifier", "identifierPattern", + "constant", "params", "limitCommand", "sortCommand", "orderExpression", + "keepCommand", "dropCommand", "renameCommand", "renameClause", "dissectCommand", + "grokCommand", "mvExpandCommand", "commandOptions", "commandOption", + "booleanValue", "numericValue", "decimalValue", "integerValue", "string", + "comparisonOperator", "explainCommand", "subqueryExpression", "showCommand", + "metaCommand", "enrichCommand", "enrichWithClause", "lookupCommand" }; } public static final String[] ruleNames = makeRuleNames(); @@ -103,11 +103,11 @@ private static String[] makeSymbolicNames() { null, "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", "INLINESTATS", "KEEP", "LIMIT", "LOOKUP", "META", "METRICS", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", "WHERE", "UNKNOWN_CMD", "LINE_COMMENT", - "MULTILINE_COMMENT", "WS", "INDEX_UNQUOTED_IDENTIFIER", "EXPLAIN_WS", - "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", "PIPE", "QUOTED_STRING", - "INTEGER_LITERAL", "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", - "COMMA", "DESC", "DOT", "FALSE", "FIRST", "LAST", "LP", "IN", "IS", "LIKE", - "NOT", "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", "EQ", "CIEQ", + "MULTILINE_COMMENT", "WS", "UNQUOTED_SOURCE", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", + "EXPLAIN_MULTILINE_COMMENT", "PIPE", "QUOTED_STRING", "INTEGER_LITERAL", + "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", "COMMA", + "DESC", "DOT", "FALSE", "FIRST", "LAST", "LP", "IN", "IS", "LIKE", "NOT", + "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", "EQ", "CIEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", "CLOSING_BRACKET", "UNQUOTED_IDENTIFIER", "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", @@ -211,9 +211,9 @@ public final SingleStatementContext singleStatement() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(112); + setState(116); query(0); - setState(113); + setState(117); match(EOF); } } @@ -309,11 +309,11 @@ private QueryContext query(int _p) throws RecognitionException { _ctx = _localctx; _prevctx = _localctx; - setState(116); + setState(120); sourceCommand(); } _ctx.stop = _input.LT(-1); - setState(123); + setState(127); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -324,16 +324,16 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new CompositeQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(118); + setState(122); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(119); + setState(123); match(PIPE); - setState(120); + setState(124); processingCommand(); } } } - setState(125); + setState(129); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); } @@ -394,48 +394,48 @@ public final SourceCommandContext sourceCommand() throws RecognitionException { SourceCommandContext _localctx = new SourceCommandContext(_ctx, getState()); enterRule(_localctx, 4, RULE_sourceCommand); try { - setState(132); + setState(136); _errHandler.sync(this); switch (_input.LA(1)) { case EXPLAIN: enterOuterAlt(_localctx, 1); { - setState(126); + setState(130); explainCommand(); } break; case FROM: enterOuterAlt(_localctx, 2); { - setState(127); + setState(131); fromCommand(); } break; case ROW: enterOuterAlt(_localctx, 3); { - setState(128); + setState(132); rowCommand(); } break; case METRICS: enterOuterAlt(_localctx, 4); { - setState(129); + setState(133); metricsCommand(); } break; case SHOW: enterOuterAlt(_localctx, 5); { - setState(130); + setState(134); showCommand(); } break; case META: enterOuterAlt(_localctx, 6); { - setState(131); + setState(135); metaCommand(); } break; @@ -522,104 +522,104 @@ public final ProcessingCommandContext processingCommand() throws RecognitionExce ProcessingCommandContext _localctx = new ProcessingCommandContext(_ctx, getState()); enterRule(_localctx, 6, RULE_processingCommand); try { - setState(148); + setState(152); _errHandler.sync(this); switch (_input.LA(1)) { case EVAL: enterOuterAlt(_localctx, 1); { - setState(134); + setState(138); evalCommand(); } break; case INLINESTATS: enterOuterAlt(_localctx, 2); { - setState(135); + setState(139); inlinestatsCommand(); } break; case LIMIT: enterOuterAlt(_localctx, 3); { - setState(136); + setState(140); limitCommand(); } break; case LOOKUP: enterOuterAlt(_localctx, 4); { - setState(137); + setState(141); lookupCommand(); } break; case KEEP: enterOuterAlt(_localctx, 5); { - setState(138); + setState(142); keepCommand(); } break; case SORT: enterOuterAlt(_localctx, 6); { - setState(139); + setState(143); sortCommand(); } break; case STATS: enterOuterAlt(_localctx, 7); { - setState(140); + setState(144); statsCommand(); } break; case WHERE: enterOuterAlt(_localctx, 8); { - setState(141); + setState(145); whereCommand(); } break; case DROP: enterOuterAlt(_localctx, 9); { - setState(142); + setState(146); dropCommand(); } break; case RENAME: enterOuterAlt(_localctx, 10); { - setState(143); + setState(147); renameCommand(); } break; case DISSECT: enterOuterAlt(_localctx, 11); { - setState(144); + setState(148); dissectCommand(); } break; case GROK: enterOuterAlt(_localctx, 12); { - setState(145); + setState(149); grokCommand(); } break; case ENRICH: enterOuterAlt(_localctx, 13); { - setState(146); + setState(150); enrichCommand(); } break; case MV_EXPAND: enterOuterAlt(_localctx, 14); { - setState(147); + setState(151); mvExpandCommand(); } break; @@ -670,9 +670,9 @@ public final WhereCommandContext whereCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(150); + setState(154); match(WHERE); - setState(151); + setState(155); booleanExpression(0); } } @@ -867,7 +867,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc int _alt; enterOuterAlt(_localctx, 1); { - setState(181); + setState(185); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { case 1: @@ -876,9 +876,9 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _ctx = _localctx; _prevctx = _localctx; - setState(154); + setState(158); match(NOT); - setState(155); + setState(159); booleanExpression(7); } break; @@ -887,7 +887,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new BooleanDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(156); + setState(160); valueExpression(); } break; @@ -896,7 +896,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new RegexExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(157); + setState(161); regexBooleanExpression(); } break; @@ -905,41 +905,41 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalInContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(158); + setState(162); valueExpression(); - setState(160); + setState(164); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(159); + setState(163); match(NOT); } } - setState(162); + setState(166); match(IN); - setState(163); + setState(167); match(LP); - setState(164); + setState(168); valueExpression(); - setState(169); + setState(173); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(165); + setState(169); match(COMMA); - setState(166); + setState(170); valueExpression(); } } - setState(171); + setState(175); _errHandler.sync(this); _la = _input.LA(1); } - setState(172); + setState(176); match(RP); } break; @@ -948,27 +948,27 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new IsNullContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(174); + setState(178); valueExpression(); - setState(175); + setState(179); match(IS); - setState(177); + setState(181); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(176); + setState(180); match(NOT); } } - setState(179); + setState(183); match(NULL); } break; } _ctx.stop = _input.LT(-1); - setState(191); + setState(195); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -976,7 +976,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(189); + setState(193); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { case 1: @@ -984,11 +984,11 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(183); + setState(187); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(184); + setState(188); ((LogicalBinaryContext)_localctx).operator = match(AND); - setState(185); + setState(189); ((LogicalBinaryContext)_localctx).right = booleanExpression(5); } break; @@ -997,18 +997,18 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(186); + setState(190); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(187); + setState(191); ((LogicalBinaryContext)_localctx).operator = match(OR); - setState(188); + setState(192); ((LogicalBinaryContext)_localctx).right = booleanExpression(4); } break; } } } - setState(193); + setState(197); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); } @@ -1063,48 +1063,48 @@ public final RegexBooleanExpressionContext regexBooleanExpression() throws Recog enterRule(_localctx, 12, RULE_regexBooleanExpression); int _la; try { - setState(208); + setState(212); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(194); + setState(198); valueExpression(); - setState(196); + setState(200); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(195); + setState(199); match(NOT); } } - setState(198); + setState(202); ((RegexBooleanExpressionContext)_localctx).kind = match(LIKE); - setState(199); + setState(203); ((RegexBooleanExpressionContext)_localctx).pattern = string(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(201); + setState(205); valueExpression(); - setState(203); + setState(207); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(202); + setState(206); match(NOT); } } - setState(205); + setState(209); ((RegexBooleanExpressionContext)_localctx).kind = match(RLIKE); - setState(206); + setState(210); ((RegexBooleanExpressionContext)_localctx).pattern = string(); } break; @@ -1190,14 +1190,14 @@ public final ValueExpressionContext valueExpression() throws RecognitionExceptio ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, getState()); enterRule(_localctx, 14, RULE_valueExpression); try { - setState(215); + setState(219); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: _localctx = new ValueExpressionDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(210); + setState(214); operatorExpression(0); } break; @@ -1205,11 +1205,11 @@ public final ValueExpressionContext valueExpression() throws RecognitionExceptio _localctx = new ComparisonContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(211); + setState(215); ((ComparisonContext)_localctx).left = operatorExpression(0); - setState(212); + setState(216); comparisonOperator(); - setState(213); + setState(217); ((ComparisonContext)_localctx).right = operatorExpression(0); } break; @@ -1334,7 +1334,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE int _alt; enterOuterAlt(_localctx, 1); { - setState(221); + setState(225); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: @@ -1343,7 +1343,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _ctx = _localctx; _prevctx = _localctx; - setState(218); + setState(222); primaryExpression(0); } break; @@ -1352,7 +1352,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(219); + setState(223); ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -1363,13 +1363,13 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(220); + setState(224); operatorExpression(3); } break; } _ctx.stop = _input.LT(-1); - setState(231); + setState(235); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,15,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1377,7 +1377,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(229); + setState(233); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: @@ -1385,9 +1385,9 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticBinaryContext(new OperatorExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_operatorExpression); - setState(223); + setState(227); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(224); + setState(228); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & 7L) != 0)) ) { @@ -1398,7 +1398,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(225); + setState(229); ((ArithmeticBinaryContext)_localctx).right = operatorExpression(3); } break; @@ -1407,9 +1407,9 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticBinaryContext(new OperatorExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_operatorExpression); - setState(226); + setState(230); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(227); + setState(231); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -1420,14 +1420,14 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(228); + setState(232); ((ArithmeticBinaryContext)_localctx).right = operatorExpression(2); } break; } } } - setState(233); + setState(237); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,15,_ctx); } @@ -1585,7 +1585,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc int _alt; enterOuterAlt(_localctx, 1); { - setState(242); + setState(246); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,16,_ctx) ) { case 1: @@ -1594,7 +1594,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _ctx = _localctx; _prevctx = _localctx; - setState(235); + setState(239); constant(); } break; @@ -1603,7 +1603,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new DereferenceContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(236); + setState(240); qualifiedName(); } break; @@ -1612,7 +1612,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new FunctionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(237); + setState(241); functionExpression(); } break; @@ -1621,17 +1621,17 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new ParenthesizedExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(238); + setState(242); match(LP); - setState(239); + setState(243); booleanExpression(0); - setState(240); + setState(244); match(RP); } break; } _ctx.stop = _input.LT(-1); - setState(249); + setState(253); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,17,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1642,16 +1642,16 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc { _localctx = new InlineCastContext(new PrimaryExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_primaryExpression); - setState(244); + setState(248); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(245); + setState(249); match(CAST_OP); - setState(246); + setState(250); dataType(); } } } - setState(251); + setState(255); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,17,_ctx); } @@ -1713,16 +1713,16 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(252); + setState(256); identifier(); - setState(253); + setState(257); match(LP); - setState(263); + setState(267); _errHandler.sync(this); switch (_input.LA(1)) { case ASTERISK: { - setState(254); + setState(258); match(ASTERISK); } break; @@ -1743,21 +1743,21 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx case QUOTED_IDENTIFIER: { { - setState(255); + setState(259); booleanExpression(0); - setState(260); + setState(264); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(256); + setState(260); match(COMMA); - setState(257); + setState(261); booleanExpression(0); } } - setState(262); + setState(266); _errHandler.sync(this); _la = _input.LA(1); } @@ -1769,7 +1769,7 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx default: break; } - setState(265); + setState(269); match(RP); } } @@ -1827,7 +1827,7 @@ public final DataTypeContext dataType() throws RecognitionException { _localctx = new ToDataTypeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(267); + setState(271); identifier(); } } @@ -1874,9 +1874,9 @@ public final RowCommandContext rowCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(269); + setState(273); match(ROW); - setState(270); + setState(274); fields(); } } @@ -1930,23 +1930,23 @@ public final FieldsContext fields() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(272); + setState(276); field(); - setState(277); + setState(281); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(273); + setState(277); match(COMMA); - setState(274); + setState(278); field(); } } } - setState(279); + setState(283); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); } @@ -1996,24 +1996,24 @@ public final FieldContext field() throws RecognitionException { FieldContext _localctx = new FieldContext(_ctx, getState()); enterRule(_localctx, 28, RULE_field); try { - setState(285); + setState(289); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,21,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(280); + setState(284); booleanExpression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(281); + setState(285); qualifiedName(); - setState(282); + setState(286); match(ASSIGN); - setState(283); + setState(287); booleanExpression(0); } break; @@ -2033,11 +2033,11 @@ public final FieldContext field() throws RecognitionException { @SuppressWarnings("CheckReturnValue") public static class FromCommandContext extends ParserRuleContext { public TerminalNode FROM() { return getToken(EsqlBaseParser.FROM, 0); } - public List indexIdentifier() { - return getRuleContexts(IndexIdentifierContext.class); + public List indexPattern() { + return getRuleContexts(IndexPatternContext.class); } - public IndexIdentifierContext indexIdentifier(int i) { - return getRuleContext(IndexIdentifierContext.class,i); + public IndexPatternContext indexPattern(int i) { + return getRuleContext(IndexPatternContext.class,i); } public List COMMA() { return getTokens(EsqlBaseParser.COMMA); } public TerminalNode COMMA(int i) { @@ -2073,34 +2073,34 @@ public final FromCommandContext fromCommand() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(287); + setState(291); match(FROM); - setState(288); - indexIdentifier(); - setState(293); + setState(292); + indexPattern(); + setState(297); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(289); + setState(293); match(COMMA); - setState(290); - indexIdentifier(); + setState(294); + indexPattern(); } } } - setState(295); + setState(299); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); } - setState(297); + setState(301); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) { case 1: { - setState(296); + setState(300); metadata(); } break; @@ -2119,36 +2119,157 @@ public final FromCommandContext fromCommand() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class IndexIdentifierContext extends ParserRuleContext { - public TerminalNode INDEX_UNQUOTED_IDENTIFIER() { return getToken(EsqlBaseParser.INDEX_UNQUOTED_IDENTIFIER, 0); } + public static class IndexPatternContext extends ParserRuleContext { + public ClusterStringContext clusterString() { + return getRuleContext(ClusterStringContext.class,0); + } + public TerminalNode COLON() { return getToken(EsqlBaseParser.COLON, 0); } + public IndexStringContext indexString() { + return getRuleContext(IndexStringContext.class,0); + } @SuppressWarnings("this-escape") - public IndexIdentifierContext(ParserRuleContext parent, int invokingState) { + public IndexPatternContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_indexIdentifier; } + @Override public int getRuleIndex() { return RULE_indexPattern; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterIndexIdentifier(this); + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterIndexPattern(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitIndexIdentifier(this); + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitIndexPattern(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitIndexIdentifier(this); + if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitIndexPattern(this); else return visitor.visitChildren(this); } } - public final IndexIdentifierContext indexIdentifier() throws RecognitionException { - IndexIdentifierContext _localctx = new IndexIdentifierContext(_ctx, getState()); - enterRule(_localctx, 32, RULE_indexIdentifier); + public final IndexPatternContext indexPattern() throws RecognitionException { + IndexPatternContext _localctx = new IndexPatternContext(_ctx, getState()); + enterRule(_localctx, 32, RULE_indexPattern); + try { + setState(308); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(303); + clusterString(); + setState(304); + match(COLON); + setState(305); + indexString(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(307); + indexString(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class ClusterStringContext extends ParserRuleContext { + public TerminalNode UNQUOTED_SOURCE() { return getToken(EsqlBaseParser.UNQUOTED_SOURCE, 0); } + @SuppressWarnings("this-escape") + public ClusterStringContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_clusterString; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterClusterString(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitClusterString(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitClusterString(this); + else return visitor.visitChildren(this); + } + } + + public final ClusterStringContext clusterString() throws RecognitionException { + ClusterStringContext _localctx = new ClusterStringContext(_ctx, getState()); + enterRule(_localctx, 34, RULE_clusterString); try { enterOuterAlt(_localctx, 1); { - setState(299); - match(INDEX_UNQUOTED_IDENTIFIER); + setState(310); + match(UNQUOTED_SOURCE); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class IndexStringContext extends ParserRuleContext { + public TerminalNode UNQUOTED_SOURCE() { return getToken(EsqlBaseParser.UNQUOTED_SOURCE, 0); } + public TerminalNode QUOTED_STRING() { return getToken(EsqlBaseParser.QUOTED_STRING, 0); } + @SuppressWarnings("this-escape") + public IndexStringContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_indexString; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterIndexString(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitIndexString(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitIndexString(this); + else return visitor.visitChildren(this); + } + } + + public final IndexStringContext indexString() throws RecognitionException { + IndexStringContext _localctx = new IndexStringContext(_ctx, getState()); + enterRule(_localctx, 36, RULE_indexString); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(312); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_SOURCE || _la==QUOTED_STRING) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } } } catch (RecognitionException re) { @@ -2192,22 +2313,22 @@ public T accept(ParseTreeVisitor visitor) { public final MetadataContext metadata() throws RecognitionException { MetadataContext _localctx = new MetadataContext(_ctx, getState()); - enterRule(_localctx, 34, RULE_metadata); + enterRule(_localctx, 38, RULE_metadata); try { - setState(303); + setState(316); _errHandler.sync(this); switch (_input.LA(1)) { case METADATA: enterOuterAlt(_localctx, 1); { - setState(301); + setState(314); metadataOption(); } break; case OPENING_BRACKET: enterOuterAlt(_localctx, 2); { - setState(302); + setState(315); deprecated_metadata(); } break; @@ -2229,11 +2350,9 @@ public final MetadataContext metadata() throws RecognitionException { @SuppressWarnings("CheckReturnValue") public static class MetadataOptionContext extends ParserRuleContext { public TerminalNode METADATA() { return getToken(EsqlBaseParser.METADATA, 0); } - public List indexIdentifier() { - return getRuleContexts(IndexIdentifierContext.class); - } - public IndexIdentifierContext indexIdentifier(int i) { - return getRuleContext(IndexIdentifierContext.class,i); + public List UNQUOTED_SOURCE() { return getTokens(EsqlBaseParser.UNQUOTED_SOURCE); } + public TerminalNode UNQUOTED_SOURCE(int i) { + return getToken(EsqlBaseParser.UNQUOTED_SOURCE, i); } public List COMMA() { return getTokens(EsqlBaseParser.COMMA); } public TerminalNode COMMA(int i) { @@ -2261,32 +2380,32 @@ public T accept(ParseTreeVisitor visitor) { public final MetadataOptionContext metadataOption() throws RecognitionException { MetadataOptionContext _localctx = new MetadataOptionContext(_ctx, getState()); - enterRule(_localctx, 36, RULE_metadataOption); + enterRule(_localctx, 40, RULE_metadataOption); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(305); + setState(318); match(METADATA); - setState(306); - indexIdentifier(); - setState(311); + setState(319); + match(UNQUOTED_SOURCE); + setState(324); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(307); + setState(320); match(COMMA); - setState(308); - indexIdentifier(); + setState(321); + match(UNQUOTED_SOURCE); } } } - setState(313); + setState(326); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); } } } @@ -2329,15 +2448,15 @@ public T accept(ParseTreeVisitor visitor) { public final Deprecated_metadataContext deprecated_metadata() throws RecognitionException { Deprecated_metadataContext _localctx = new Deprecated_metadataContext(_ctx, getState()); - enterRule(_localctx, 38, RULE_deprecated_metadata); + enterRule(_localctx, 42, RULE_deprecated_metadata); try { enterOuterAlt(_localctx, 1); { - setState(314); + setState(327); match(OPENING_BRACKET); - setState(315); + setState(328); metadataOption(); - setState(316); + setState(329); match(CLOSING_BRACKET); } } @@ -2357,11 +2476,11 @@ public static class MetricsCommandContext extends ParserRuleContext { public FieldsContext aggregates; public FieldsContext grouping; public TerminalNode METRICS() { return getToken(EsqlBaseParser.METRICS, 0); } - public List indexIdentifier() { - return getRuleContexts(IndexIdentifierContext.class); + public List indexPattern() { + return getRuleContexts(IndexPatternContext.class); } - public IndexIdentifierContext indexIdentifier(int i) { - return getRuleContext(IndexIdentifierContext.class,i); + public IndexPatternContext indexPattern(int i) { + return getRuleContext(IndexPatternContext.class,i); } public List COMMA() { return getTokens(EsqlBaseParser.COMMA); } public TerminalNode COMMA(int i) { @@ -2396,51 +2515,51 @@ public T accept(ParseTreeVisitor visitor) { public final MetricsCommandContext metricsCommand() throws RecognitionException { MetricsCommandContext _localctx = new MetricsCommandContext(_ctx, getState()); - enterRule(_localctx, 40, RULE_metricsCommand); + enterRule(_localctx, 44, RULE_metricsCommand); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(318); + setState(331); match(METRICS); - setState(319); - indexIdentifier(); - setState(324); + setState(332); + indexPattern(); + setState(337); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + _alt = getInterpreter().adaptivePredict(_input,27,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(320); + setState(333); match(COMMA); - setState(321); - indexIdentifier(); + setState(334); + indexPattern(); } } } - setState(326); + setState(339); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + _alt = getInterpreter().adaptivePredict(_input,27,_ctx); } - setState(328); + setState(341); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { case 1: { - setState(327); + setState(340); ((MetricsCommandContext)_localctx).aggregates = fields(); } break; } - setState(332); + setState(345); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: { - setState(330); + setState(343); match(BY); - setState(331); + setState(344); ((MetricsCommandContext)_localctx).grouping = fields(); } break; @@ -2486,13 +2605,13 @@ public T accept(ParseTreeVisitor visitor) { public final EvalCommandContext evalCommand() throws RecognitionException { EvalCommandContext _localctx = new EvalCommandContext(_ctx, getState()); - enterRule(_localctx, 42, RULE_evalCommand); + enterRule(_localctx, 46, RULE_evalCommand); try { enterOuterAlt(_localctx, 1); { - setState(334); + setState(347); match(EVAL); - setState(335); + setState(348); fields(); } } @@ -2541,30 +2660,30 @@ public T accept(ParseTreeVisitor visitor) { public final StatsCommandContext statsCommand() throws RecognitionException { StatsCommandContext _localctx = new StatsCommandContext(_ctx, getState()); - enterRule(_localctx, 44, RULE_statsCommand); + enterRule(_localctx, 48, RULE_statsCommand); try { enterOuterAlt(_localctx, 1); { - setState(337); + setState(350); match(STATS); - setState(339); + setState(352); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { case 1: { - setState(338); + setState(351); ((StatsCommandContext)_localctx).stats = fields(); } break; } - setState(343); + setState(356); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) { case 1: { - setState(341); + setState(354); match(BY); - setState(342); + setState(355); ((StatsCommandContext)_localctx).grouping = fields(); } break; @@ -2616,22 +2735,22 @@ public T accept(ParseTreeVisitor visitor) { public final InlinestatsCommandContext inlinestatsCommand() throws RecognitionException { InlinestatsCommandContext _localctx = new InlinestatsCommandContext(_ctx, getState()); - enterRule(_localctx, 46, RULE_inlinestatsCommand); + enterRule(_localctx, 50, RULE_inlinestatsCommand); try { enterOuterAlt(_localctx, 1); { - setState(345); + setState(358); match(INLINESTATS); - setState(346); + setState(359); ((InlinestatsCommandContext)_localctx).stats = fields(); - setState(349); + setState(362); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,32,_ctx) ) { case 1: { - setState(347); + setState(360); match(BY); - setState(348); + setState(361); ((InlinestatsCommandContext)_localctx).grouping = fields(); } break; @@ -2683,30 +2802,30 @@ public T accept(ParseTreeVisitor visitor) { public final QualifiedNameContext qualifiedName() throws RecognitionException { QualifiedNameContext _localctx = new QualifiedNameContext(_ctx, getState()); - enterRule(_localctx, 48, RULE_qualifiedName); + enterRule(_localctx, 52, RULE_qualifiedName); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(351); + setState(364); identifier(); - setState(356); + setState(369); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + _alt = getInterpreter().adaptivePredict(_input,33,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(352); + setState(365); match(DOT); - setState(353); + setState(366); identifier(); } } } - setState(358); + setState(371); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + _alt = getInterpreter().adaptivePredict(_input,33,_ctx); } } } @@ -2755,30 +2874,30 @@ public T accept(ParseTreeVisitor visitor) { public final QualifiedNamePatternContext qualifiedNamePattern() throws RecognitionException { QualifiedNamePatternContext _localctx = new QualifiedNamePatternContext(_ctx, getState()); - enterRule(_localctx, 50, RULE_qualifiedNamePattern); + enterRule(_localctx, 54, RULE_qualifiedNamePattern); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(359); + setState(372); identifierPattern(); - setState(364); + setState(377); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,33,_ctx); + _alt = getInterpreter().adaptivePredict(_input,34,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(360); + setState(373); match(DOT); - setState(361); + setState(374); identifierPattern(); } } } - setState(366); + setState(379); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,33,_ctx); + _alt = getInterpreter().adaptivePredict(_input,34,_ctx); } } } @@ -2827,30 +2946,30 @@ public T accept(ParseTreeVisitor visitor) { public final QualifiedNamePatternsContext qualifiedNamePatterns() throws RecognitionException { QualifiedNamePatternsContext _localctx = new QualifiedNamePatternsContext(_ctx, getState()); - enterRule(_localctx, 52, RULE_qualifiedNamePatterns); + enterRule(_localctx, 56, RULE_qualifiedNamePatterns); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(367); + setState(380); qualifiedNamePattern(); - setState(372); + setState(385); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,34,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(368); + setState(381); match(COMMA); - setState(369); + setState(382); qualifiedNamePattern(); } } } - setState(374); + setState(387); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,34,_ctx); + _alt = getInterpreter().adaptivePredict(_input,35,_ctx); } } } @@ -2891,12 +3010,12 @@ public T accept(ParseTreeVisitor visitor) { public final IdentifierContext identifier() throws RecognitionException { IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); - enterRule(_localctx, 54, RULE_identifier); + enterRule(_localctx, 58, RULE_identifier); int _la; try { enterOuterAlt(_localctx, 1); { - setState(375); + setState(388); _la = _input.LA(1); if ( !(_la==UNQUOTED_IDENTIFIER || _la==QUOTED_IDENTIFIER) ) { _errHandler.recoverInline(this); @@ -2944,11 +3063,11 @@ public T accept(ParseTreeVisitor visitor) { public final IdentifierPatternContext identifierPattern() throws RecognitionException { IdentifierPatternContext _localctx = new IdentifierPatternContext(_ctx, getState()); - enterRule(_localctx, 56, RULE_identifierPattern); + enterRule(_localctx, 60, RULE_identifierPattern); try { enterOuterAlt(_localctx, 1); { - setState(377); + setState(390); match(ID_PATTERN); } } @@ -3216,17 +3335,17 @@ public T accept(ParseTreeVisitor visitor) { public final ConstantContext constant() throws RecognitionException { ConstantContext _localctx = new ConstantContext(_ctx, getState()); - enterRule(_localctx, 58, RULE_constant); + enterRule(_localctx, 62, RULE_constant); int _la; try { - setState(421); + setState(434); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,38,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,39,_ctx) ) { case 1: _localctx = new NullLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(379); + setState(392); match(NULL); } break; @@ -3234,9 +3353,9 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new QualifiedIntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(380); + setState(393); integerValue(); - setState(381); + setState(394); match(UNQUOTED_IDENTIFIER); } break; @@ -3244,7 +3363,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(383); + setState(396); decimalValue(); } break; @@ -3252,7 +3371,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(384); + setState(397); integerValue(); } break; @@ -3260,7 +3379,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanLiteralContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(385); + setState(398); booleanValue(); } break; @@ -3268,7 +3387,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new InputParamsContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(386); + setState(399); params(); } break; @@ -3276,7 +3395,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringLiteralContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(387); + setState(400); string(); } break; @@ -3284,27 +3403,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new NumericArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(388); + setState(401); match(OPENING_BRACKET); - setState(389); + setState(402); numericValue(); - setState(394); + setState(407); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(390); + setState(403); match(COMMA); - setState(391); + setState(404); numericValue(); } } - setState(396); + setState(409); _errHandler.sync(this); _la = _input.LA(1); } - setState(397); + setState(410); match(CLOSING_BRACKET); } break; @@ -3312,27 +3431,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(399); + setState(412); match(OPENING_BRACKET); - setState(400); + setState(413); booleanValue(); - setState(405); + setState(418); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(401); + setState(414); match(COMMA); - setState(402); + setState(415); booleanValue(); } } - setState(407); + setState(420); _errHandler.sync(this); _la = _input.LA(1); } - setState(408); + setState(421); match(CLOSING_BRACKET); } break; @@ -3340,27 +3459,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(410); + setState(423); match(OPENING_BRACKET); - setState(411); + setState(424); string(); - setState(416); + setState(429); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(412); + setState(425); match(COMMA); - setState(413); + setState(426); string(); } } - setState(418); + setState(431); _errHandler.sync(this); _la = _input.LA(1); } - setState(419); + setState(432); match(CLOSING_BRACKET); } break; @@ -3432,16 +3551,16 @@ public T accept(ParseTreeVisitor visitor) { public final ParamsContext params() throws RecognitionException { ParamsContext _localctx = new ParamsContext(_ctx, getState()); - enterRule(_localctx, 60, RULE_params); + enterRule(_localctx, 64, RULE_params); try { - setState(425); + setState(438); _errHandler.sync(this); switch (_input.LA(1)) { case PARAM: _localctx = new InputParamContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(423); + setState(436); match(PARAM); } break; @@ -3449,7 +3568,7 @@ public final ParamsContext params() throws RecognitionException { _localctx = new InputNamedOrPositionalParamContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(424); + setState(437); match(NAMED_OR_POSITIONAL_PARAM); } break; @@ -3494,13 +3613,13 @@ public T accept(ParseTreeVisitor visitor) { public final LimitCommandContext limitCommand() throws RecognitionException { LimitCommandContext _localctx = new LimitCommandContext(_ctx, getState()); - enterRule(_localctx, 62, RULE_limitCommand); + enterRule(_localctx, 66, RULE_limitCommand); try { enterOuterAlt(_localctx, 1); { - setState(427); + setState(440); match(LIMIT); - setState(428); + setState(441); match(INTEGER_LITERAL); } } @@ -3550,32 +3669,32 @@ public T accept(ParseTreeVisitor visitor) { public final SortCommandContext sortCommand() throws RecognitionException { SortCommandContext _localctx = new SortCommandContext(_ctx, getState()); - enterRule(_localctx, 64, RULE_sortCommand); + enterRule(_localctx, 68, RULE_sortCommand); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(430); + setState(443); match(SORT); - setState(431); + setState(444); orderExpression(); - setState(436); + setState(449); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,40,_ctx); + _alt = getInterpreter().adaptivePredict(_input,41,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(432); + setState(445); match(COMMA); - setState(433); + setState(446); orderExpression(); } } } - setState(438); + setState(451); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,40,_ctx); + _alt = getInterpreter().adaptivePredict(_input,41,_ctx); } } } @@ -3624,19 +3743,19 @@ public T accept(ParseTreeVisitor visitor) { public final OrderExpressionContext orderExpression() throws RecognitionException { OrderExpressionContext _localctx = new OrderExpressionContext(_ctx, getState()); - enterRule(_localctx, 66, RULE_orderExpression); + enterRule(_localctx, 70, RULE_orderExpression); int _la; try { enterOuterAlt(_localctx, 1); { - setState(439); + setState(452); booleanExpression(0); - setState(441); + setState(454); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,42,_ctx) ) { case 1: { - setState(440); + setState(453); ((OrderExpressionContext)_localctx).ordering = _input.LT(1); _la = _input.LA(1); if ( !(_la==ASC || _la==DESC) ) { @@ -3650,14 +3769,14 @@ public final OrderExpressionContext orderExpression() throws RecognitionExceptio } break; } - setState(445); + setState(458); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,42,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: { - setState(443); + setState(456); match(NULLS); - setState(444); + setState(457); ((OrderExpressionContext)_localctx).nullOrdering = _input.LT(1); _la = _input.LA(1); if ( !(_la==FIRST || _la==LAST) ) { @@ -3712,13 +3831,13 @@ public T accept(ParseTreeVisitor visitor) { public final KeepCommandContext keepCommand() throws RecognitionException { KeepCommandContext _localctx = new KeepCommandContext(_ctx, getState()); - enterRule(_localctx, 68, RULE_keepCommand); + enterRule(_localctx, 72, RULE_keepCommand); try { enterOuterAlt(_localctx, 1); { - setState(447); + setState(460); match(KEEP); - setState(448); + setState(461); qualifiedNamePatterns(); } } @@ -3761,13 +3880,13 @@ public T accept(ParseTreeVisitor visitor) { public final DropCommandContext dropCommand() throws RecognitionException { DropCommandContext _localctx = new DropCommandContext(_ctx, getState()); - enterRule(_localctx, 70, RULE_dropCommand); + enterRule(_localctx, 74, RULE_dropCommand); try { enterOuterAlt(_localctx, 1); { - setState(450); + setState(463); match(DROP); - setState(451); + setState(464); qualifiedNamePatterns(); } } @@ -3817,32 +3936,32 @@ public T accept(ParseTreeVisitor visitor) { public final RenameCommandContext renameCommand() throws RecognitionException { RenameCommandContext _localctx = new RenameCommandContext(_ctx, getState()); - enterRule(_localctx, 72, RULE_renameCommand); + enterRule(_localctx, 76, RULE_renameCommand); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(453); + setState(466); match(RENAME); - setState(454); + setState(467); renameClause(); - setState(459); + setState(472); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,43,_ctx); + _alt = getInterpreter().adaptivePredict(_input,44,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(455); + setState(468); match(COMMA); - setState(456); + setState(469); renameClause(); } } } - setState(461); + setState(474); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,43,_ctx); + _alt = getInterpreter().adaptivePredict(_input,44,_ctx); } } } @@ -3890,15 +4009,15 @@ public T accept(ParseTreeVisitor visitor) { public final RenameClauseContext renameClause() throws RecognitionException { RenameClauseContext _localctx = new RenameClauseContext(_ctx, getState()); - enterRule(_localctx, 74, RULE_renameClause); + enterRule(_localctx, 78, RULE_renameClause); try { enterOuterAlt(_localctx, 1); { - setState(462); + setState(475); ((RenameClauseContext)_localctx).oldName = qualifiedNamePattern(); - setState(463); + setState(476); match(AS); - setState(464); + setState(477); ((RenameClauseContext)_localctx).newName = qualifiedNamePattern(); } } @@ -3947,22 +4066,22 @@ public T accept(ParseTreeVisitor visitor) { public final DissectCommandContext dissectCommand() throws RecognitionException { DissectCommandContext _localctx = new DissectCommandContext(_ctx, getState()); - enterRule(_localctx, 76, RULE_dissectCommand); + enterRule(_localctx, 80, RULE_dissectCommand); try { enterOuterAlt(_localctx, 1); { - setState(466); + setState(479); match(DISSECT); - setState(467); + setState(480); primaryExpression(0); - setState(468); + setState(481); string(); - setState(470); + setState(483); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,44,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,45,_ctx) ) { case 1: { - setState(469); + setState(482); commandOptions(); } break; @@ -4011,15 +4130,15 @@ public T accept(ParseTreeVisitor visitor) { public final GrokCommandContext grokCommand() throws RecognitionException { GrokCommandContext _localctx = new GrokCommandContext(_ctx, getState()); - enterRule(_localctx, 78, RULE_grokCommand); + enterRule(_localctx, 82, RULE_grokCommand); try { enterOuterAlt(_localctx, 1); { - setState(472); + setState(485); match(GROK); - setState(473); + setState(486); primaryExpression(0); - setState(474); + setState(487); string(); } } @@ -4062,13 +4181,13 @@ public T accept(ParseTreeVisitor visitor) { public final MvExpandCommandContext mvExpandCommand() throws RecognitionException { MvExpandCommandContext _localctx = new MvExpandCommandContext(_ctx, getState()); - enterRule(_localctx, 80, RULE_mvExpandCommand); + enterRule(_localctx, 84, RULE_mvExpandCommand); try { enterOuterAlt(_localctx, 1); { - setState(476); + setState(489); match(MV_EXPAND); - setState(477); + setState(490); qualifiedName(); } } @@ -4117,30 +4236,30 @@ public T accept(ParseTreeVisitor visitor) { public final CommandOptionsContext commandOptions() throws RecognitionException { CommandOptionsContext _localctx = new CommandOptionsContext(_ctx, getState()); - enterRule(_localctx, 82, RULE_commandOptions); + enterRule(_localctx, 86, RULE_commandOptions); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(479); + setState(492); commandOption(); - setState(484); + setState(497); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,45,_ctx); + _alt = getInterpreter().adaptivePredict(_input,46,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(480); + setState(493); match(COMMA); - setState(481); + setState(494); commandOption(); } } } - setState(486); + setState(499); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,45,_ctx); + _alt = getInterpreter().adaptivePredict(_input,46,_ctx); } } } @@ -4186,15 +4305,15 @@ public T accept(ParseTreeVisitor visitor) { public final CommandOptionContext commandOption() throws RecognitionException { CommandOptionContext _localctx = new CommandOptionContext(_ctx, getState()); - enterRule(_localctx, 84, RULE_commandOption); + enterRule(_localctx, 88, RULE_commandOption); try { enterOuterAlt(_localctx, 1); { - setState(487); + setState(500); identifier(); - setState(488); + setState(501); match(ASSIGN); - setState(489); + setState(502); constant(); } } @@ -4235,12 +4354,12 @@ public T accept(ParseTreeVisitor visitor) { public final BooleanValueContext booleanValue() throws RecognitionException { BooleanValueContext _localctx = new BooleanValueContext(_ctx, getState()); - enterRule(_localctx, 86, RULE_booleanValue); + enterRule(_localctx, 90, RULE_booleanValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(491); + setState(504); _la = _input.LA(1); if ( !(_la==FALSE || _la==TRUE) ) { _errHandler.recoverInline(this); @@ -4293,22 +4412,22 @@ public T accept(ParseTreeVisitor visitor) { public final NumericValueContext numericValue() throws RecognitionException { NumericValueContext _localctx = new NumericValueContext(_ctx, getState()); - enterRule(_localctx, 88, RULE_numericValue); + enterRule(_localctx, 92, RULE_numericValue); try { - setState(495); + setState(508); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,46,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,47,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(493); + setState(506); decimalValue(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(494); + setState(507); integerValue(); } break; @@ -4352,17 +4471,17 @@ public T accept(ParseTreeVisitor visitor) { public final DecimalValueContext decimalValue() throws RecognitionException { DecimalValueContext _localctx = new DecimalValueContext(_ctx, getState()); - enterRule(_localctx, 90, RULE_decimalValue); + enterRule(_localctx, 94, RULE_decimalValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(498); + setState(511); _errHandler.sync(this); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(497); + setState(510); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -4375,7 +4494,7 @@ public final DecimalValueContext decimalValue() throws RecognitionException { } } - setState(500); + setState(513); match(DECIMAL_LITERAL); } } @@ -4417,17 +4536,17 @@ public T accept(ParseTreeVisitor visitor) { public final IntegerValueContext integerValue() throws RecognitionException { IntegerValueContext _localctx = new IntegerValueContext(_ctx, getState()); - enterRule(_localctx, 92, RULE_integerValue); + enterRule(_localctx, 96, RULE_integerValue); int _la; try { enterOuterAlt(_localctx, 1); { - setState(503); + setState(516); _errHandler.sync(this); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(502); + setState(515); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -4440,7 +4559,7 @@ public final IntegerValueContext integerValue() throws RecognitionException { } } - setState(505); + setState(518); match(INTEGER_LITERAL); } } @@ -4480,11 +4599,11 @@ public T accept(ParseTreeVisitor visitor) { public final StringContext string() throws RecognitionException { StringContext _localctx = new StringContext(_ctx, getState()); - enterRule(_localctx, 94, RULE_string); + enterRule(_localctx, 98, RULE_string); try { enterOuterAlt(_localctx, 1); { - setState(507); + setState(520); match(QUOTED_STRING); } } @@ -4529,12 +4648,12 @@ public T accept(ParseTreeVisitor visitor) { public final ComparisonOperatorContext comparisonOperator() throws RecognitionException { ComparisonOperatorContext _localctx = new ComparisonOperatorContext(_ctx, getState()); - enterRule(_localctx, 96, RULE_comparisonOperator); + enterRule(_localctx, 100, RULE_comparisonOperator); int _la; try { enterOuterAlt(_localctx, 1); { - setState(509); + setState(522); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 9007199254740992000L) != 0)) ) { _errHandler.recoverInline(this); @@ -4585,13 +4704,13 @@ public T accept(ParseTreeVisitor visitor) { public final ExplainCommandContext explainCommand() throws RecognitionException { ExplainCommandContext _localctx = new ExplainCommandContext(_ctx, getState()); - enterRule(_localctx, 98, RULE_explainCommand); + enterRule(_localctx, 102, RULE_explainCommand); try { enterOuterAlt(_localctx, 1); { - setState(511); + setState(524); match(EXPLAIN); - setState(512); + setState(525); subqueryExpression(); } } @@ -4635,15 +4754,15 @@ public T accept(ParseTreeVisitor visitor) { public final SubqueryExpressionContext subqueryExpression() throws RecognitionException { SubqueryExpressionContext _localctx = new SubqueryExpressionContext(_ctx, getState()); - enterRule(_localctx, 100, RULE_subqueryExpression); + enterRule(_localctx, 104, RULE_subqueryExpression); try { enterOuterAlt(_localctx, 1); { - setState(514); + setState(527); match(OPENING_BRACKET); - setState(515); + setState(528); query(0); - setState(516); + setState(529); match(CLOSING_BRACKET); } } @@ -4695,14 +4814,14 @@ public T accept(ParseTreeVisitor visitor) { public final ShowCommandContext showCommand() throws RecognitionException { ShowCommandContext _localctx = new ShowCommandContext(_ctx, getState()); - enterRule(_localctx, 102, RULE_showCommand); + enterRule(_localctx, 106, RULE_showCommand); try { _localctx = new ShowInfoContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(518); + setState(531); match(SHOW); - setState(519); + setState(532); match(INFO); } } @@ -4754,14 +4873,14 @@ public T accept(ParseTreeVisitor visitor) { public final MetaCommandContext metaCommand() throws RecognitionException { MetaCommandContext _localctx = new MetaCommandContext(_ctx, getState()); - enterRule(_localctx, 104, RULE_metaCommand); + enterRule(_localctx, 108, RULE_metaCommand); try { _localctx = new MetaFunctionsContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(521); + setState(534); match(META); - setState(522); + setState(535); match(FUNCTIONS); } } @@ -4819,53 +4938,53 @@ public T accept(ParseTreeVisitor visitor) { public final EnrichCommandContext enrichCommand() throws RecognitionException { EnrichCommandContext _localctx = new EnrichCommandContext(_ctx, getState()); - enterRule(_localctx, 106, RULE_enrichCommand); + enterRule(_localctx, 110, RULE_enrichCommand); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(524); + setState(537); match(ENRICH); - setState(525); + setState(538); ((EnrichCommandContext)_localctx).policyName = match(ENRICH_POLICY_NAME); - setState(528); + setState(541); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,49,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { case 1: { - setState(526); + setState(539); match(ON); - setState(527); + setState(540); ((EnrichCommandContext)_localctx).matchField = qualifiedNamePattern(); } break; } - setState(539); + setState(552); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,51,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,52,_ctx) ) { case 1: { - setState(530); + setState(543); match(WITH); - setState(531); + setState(544); enrichWithClause(); - setState(536); + setState(549); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,50,_ctx); + _alt = getInterpreter().adaptivePredict(_input,51,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(532); + setState(545); match(COMMA); - setState(533); + setState(546); enrichWithClause(); } } } - setState(538); + setState(551); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,50,_ctx); + _alt = getInterpreter().adaptivePredict(_input,51,_ctx); } } break; @@ -4916,23 +5035,23 @@ public T accept(ParseTreeVisitor visitor) { public final EnrichWithClauseContext enrichWithClause() throws RecognitionException { EnrichWithClauseContext _localctx = new EnrichWithClauseContext(_ctx, getState()); - enterRule(_localctx, 108, RULE_enrichWithClause); + enterRule(_localctx, 112, RULE_enrichWithClause); try { enterOuterAlt(_localctx, 1); { - setState(544); + setState(557); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,52,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,53,_ctx) ) { case 1: { - setState(541); + setState(554); ((EnrichWithClauseContext)_localctx).newName = qualifiedNamePattern(); - setState(542); + setState(555); match(ASSIGN); } break; } - setState(546); + setState(559); ((EnrichWithClauseContext)_localctx).enrichField = qualifiedNamePattern(); } } @@ -4949,11 +5068,13 @@ public final EnrichWithClauseContext enrichWithClause() throws RecognitionExcept @SuppressWarnings("CheckReturnValue") public static class LookupCommandContext extends ParserRuleContext { - public Token tableName; + public IndexPatternContext tableName; public QualifiedNamePatternsContext matchFields; public TerminalNode LOOKUP() { return getToken(EsqlBaseParser.LOOKUP, 0); } public TerminalNode ON() { return getToken(EsqlBaseParser.ON, 0); } - public TerminalNode INDEX_UNQUOTED_IDENTIFIER() { return getToken(EsqlBaseParser.INDEX_UNQUOTED_IDENTIFIER, 0); } + public IndexPatternContext indexPattern() { + return getRuleContext(IndexPatternContext.class,0); + } public QualifiedNamePatternsContext qualifiedNamePatterns() { return getRuleContext(QualifiedNamePatternsContext.class,0); } @@ -4979,17 +5100,17 @@ public T accept(ParseTreeVisitor visitor) { public final LookupCommandContext lookupCommand() throws RecognitionException { LookupCommandContext _localctx = new LookupCommandContext(_ctx, getState()); - enterRule(_localctx, 110, RULE_lookupCommand); + enterRule(_localctx, 114, RULE_lookupCommand); try { enterOuterAlt(_localctx, 1); { - setState(548); + setState(561); match(LOOKUP); - setState(549); - ((LookupCommandContext)_localctx).tableName = match(INDEX_UNQUOTED_IDENTIFIER); - setState(550); + setState(562); + ((LookupCommandContext)_localctx).tableName = indexPattern(); + setState(563); match(ON); - setState(551); + setState(564); ((LookupCommandContext)_localctx).matchFields = qualifiedNamePatterns(); } } @@ -5051,7 +5172,7 @@ private boolean primaryExpression_sempred(PrimaryExpressionContext _localctx, in } public static final String _serializedATN = - "\u0004\u0001|\u022a\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ + "\u0004\u0001|\u0237\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ "\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002"+ "\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002"+ "\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002"+ @@ -5066,343 +5187,351 @@ private boolean primaryExpression_sempred(PrimaryExpressionContext _localctx, in "(\u0007(\u0002)\u0007)\u0002*\u0007*\u0002+\u0007+\u0002,\u0007,\u0002"+ "-\u0007-\u0002.\u0007.\u0002/\u0007/\u00020\u00070\u00021\u00071\u0002"+ "2\u00072\u00023\u00073\u00024\u00074\u00025\u00075\u00026\u00076\u0002"+ - "7\u00077\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001z\b\u0001\n\u0001"+ - "\f\u0001}\t\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ - "\u0002\u0001\u0002\u0003\u0002\u0085\b\u0002\u0001\u0003\u0001\u0003\u0001"+ + "7\u00077\u00028\u00078\u00029\u00079\u0001\u0000\u0001\u0000\u0001\u0000"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ + "\u0005\u0001~\b\u0001\n\u0001\f\u0001\u0081\t\u0001\u0001\u0002\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0003\u0002\u0089"+ + "\b\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001"+ "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0003"+ - "\u0003\u0095\b\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001"+ - "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003"+ - "\u0005\u00a1\b\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ - "\u0005\u0005\u0005\u00a8\b\u0005\n\u0005\f\u0005\u00ab\t\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00b2\b\u0005"+ - "\u0001\u0005\u0001\u0005\u0003\u0005\u00b6\b\u0005\u0001\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0005\u0005\u00be\b\u0005"+ - "\n\u0005\f\u0005\u00c1\t\u0005\u0001\u0006\u0001\u0006\u0003\u0006\u00c5"+ - "\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003"+ - "\u0006\u00cc\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003\u0006\u00d1"+ - "\b\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0003"+ - "\u0007\u00d8\b\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0003\b\u00de\b\b"+ - "\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0005\b\u00e6\b\b\n\b"+ - "\f\b\u00e9\t\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t"+ - "\u0001\t\u0003\t\u00f3\b\t\u0001\t\u0001\t\u0001\t\u0005\t\u00f8\b\t\n"+ - "\t\f\t\u00fb\t\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0005"+ - "\n\u0103\b\n\n\n\f\n\u0106\t\n\u0003\n\u0108\b\n\u0001\n\u0001\n\u0001"+ - "\u000b\u0001\u000b\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0005"+ - "\r\u0114\b\r\n\r\f\r\u0117\t\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ - "\u000e\u0001\u000e\u0003\u000e\u011e\b\u000e\u0001\u000f\u0001\u000f\u0001"+ - "\u000f\u0001\u000f\u0005\u000f\u0124\b\u000f\n\u000f\f\u000f\u0127\t\u000f"+ - "\u0001\u000f\u0003\u000f\u012a\b\u000f\u0001\u0010\u0001\u0010\u0001\u0011"+ - "\u0001\u0011\u0003\u0011\u0130\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0005\u0012\u0136\b\u0012\n\u0012\f\u0012\u0139\t\u0012\u0001"+ - "\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0014\u0001\u0014\u0001"+ - "\u0014\u0001\u0014\u0005\u0014\u0143\b\u0014\n\u0014\f\u0014\u0146\t\u0014"+ - "\u0001\u0014\u0003\u0014\u0149\b\u0014\u0001\u0014\u0001\u0014\u0003\u0014"+ - "\u014d\b\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0016\u0001\u0016"+ - "\u0003\u0016\u0154\b\u0016\u0001\u0016\u0001\u0016\u0003\u0016\u0158\b"+ - "\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0003\u0017\u015e"+ - "\b\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0005\u0018\u0163\b\u0018"+ - "\n\u0018\f\u0018\u0166\t\u0018\u0001\u0019\u0001\u0019\u0001\u0019\u0005"+ - "\u0019\u016b\b\u0019\n\u0019\f\u0019\u016e\t\u0019\u0001\u001a\u0001\u001a"+ - "\u0001\u001a\u0005\u001a\u0173\b\u001a\n\u001a\f\u001a\u0176\t\u001a\u0001"+ - "\u001b\u0001\u001b\u0001\u001c\u0001\u001c\u0001\u001d\u0001\u001d\u0001"+ - "\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001"+ - "\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0005\u001d\u0189"+ - "\b\u001d\n\u001d\f\u001d\u018c\t\u001d\u0001\u001d\u0001\u001d\u0001\u001d"+ - "\u0001\u001d\u0001\u001d\u0001\u001d\u0005\u001d\u0194\b\u001d\n\u001d"+ - "\f\u001d\u0197\t\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d"+ - "\u0001\u001d\u0001\u001d\u0005\u001d\u019f\b\u001d\n\u001d\f\u001d\u01a2"+ - "\t\u001d\u0001\u001d\u0001\u001d\u0003\u001d\u01a6\b\u001d\u0001\u001e"+ - "\u0001\u001e\u0003\u001e\u01aa\b\u001e\u0001\u001f\u0001\u001f\u0001\u001f"+ - "\u0001 \u0001 \u0001 \u0001 \u0005 \u01b3\b \n \f \u01b6\t \u0001!\u0001"+ - "!\u0003!\u01ba\b!\u0001!\u0001!\u0003!\u01be\b!\u0001\"\u0001\"\u0001"+ - "\"\u0001#\u0001#\u0001#\u0001$\u0001$\u0001$\u0001$\u0005$\u01ca\b$\n"+ - "$\f$\u01cd\t$\u0001%\u0001%\u0001%\u0001%\u0001&\u0001&\u0001&\u0001&"+ - "\u0003&\u01d7\b&\u0001\'\u0001\'\u0001\'\u0001\'\u0001(\u0001(\u0001("+ - "\u0001)\u0001)\u0001)\u0005)\u01e3\b)\n)\f)\u01e6\t)\u0001*\u0001*\u0001"+ - "*\u0001*\u0001+\u0001+\u0001,\u0001,\u0003,\u01f0\b,\u0001-\u0003-\u01f3"+ - "\b-\u0001-\u0001-\u0001.\u0003.\u01f8\b.\u0001.\u0001.\u0001/\u0001/\u0001"+ - "0\u00010\u00011\u00011\u00011\u00012\u00012\u00012\u00012\u00013\u0001"+ - "3\u00013\u00014\u00014\u00014\u00015\u00015\u00015\u00015\u00035\u0211"+ - "\b5\u00015\u00015\u00015\u00015\u00055\u0217\b5\n5\f5\u021a\t5\u00035"+ - "\u021c\b5\u00016\u00016\u00016\u00036\u0221\b6\u00016\u00016\u00017\u0001"+ - "7\u00017\u00017\u00017\u00017\u0000\u0004\u0002\n\u0010\u00128\u0000\u0002"+ - "\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016\u0018\u001a\u001c\u001e"+ - " \"$&(*,.02468:<>@BDFHJLNPRTVXZ\\^`bdfhjln\u0000\u0007\u0001\u0000?@\u0001"+ - "\u0000AC\u0001\u0000GH\u0002\u0000##\'\'\u0001\u0000*+\u0002\u0000))7"+ - "7\u0002\u000088:>\u0244\u0000p\u0001\u0000\u0000\u0000\u0002s\u0001\u0000"+ - "\u0000\u0000\u0004\u0084\u0001\u0000\u0000\u0000\u0006\u0094\u0001\u0000"+ - "\u0000\u0000\b\u0096\u0001\u0000\u0000\u0000\n\u00b5\u0001\u0000\u0000"+ - "\u0000\f\u00d0\u0001\u0000\u0000\u0000\u000e\u00d7\u0001\u0000\u0000\u0000"+ - "\u0010\u00dd\u0001\u0000\u0000\u0000\u0012\u00f2\u0001\u0000\u0000\u0000"+ - "\u0014\u00fc\u0001\u0000\u0000\u0000\u0016\u010b\u0001\u0000\u0000\u0000"+ - "\u0018\u010d\u0001\u0000\u0000\u0000\u001a\u0110\u0001\u0000\u0000\u0000"+ - "\u001c\u011d\u0001\u0000\u0000\u0000\u001e\u011f\u0001\u0000\u0000\u0000"+ - " \u012b\u0001\u0000\u0000\u0000\"\u012f\u0001\u0000\u0000\u0000$\u0131"+ - "\u0001\u0000\u0000\u0000&\u013a\u0001\u0000\u0000\u0000(\u013e\u0001\u0000"+ - "\u0000\u0000*\u014e\u0001\u0000\u0000\u0000,\u0151\u0001\u0000\u0000\u0000"+ - ".\u0159\u0001\u0000\u0000\u00000\u015f\u0001\u0000\u0000\u00002\u0167"+ - "\u0001\u0000\u0000\u00004\u016f\u0001\u0000\u0000\u00006\u0177\u0001\u0000"+ - "\u0000\u00008\u0179\u0001\u0000\u0000\u0000:\u01a5\u0001\u0000\u0000\u0000"+ - "<\u01a9\u0001\u0000\u0000\u0000>\u01ab\u0001\u0000\u0000\u0000@\u01ae"+ - "\u0001\u0000\u0000\u0000B\u01b7\u0001\u0000\u0000\u0000D\u01bf\u0001\u0000"+ - "\u0000\u0000F\u01c2\u0001\u0000\u0000\u0000H\u01c5\u0001\u0000\u0000\u0000"+ - "J\u01ce\u0001\u0000\u0000\u0000L\u01d2\u0001\u0000\u0000\u0000N\u01d8"+ - "\u0001\u0000\u0000\u0000P\u01dc\u0001\u0000\u0000\u0000R\u01df\u0001\u0000"+ - "\u0000\u0000T\u01e7\u0001\u0000\u0000\u0000V\u01eb\u0001\u0000\u0000\u0000"+ - "X\u01ef\u0001\u0000\u0000\u0000Z\u01f2\u0001\u0000\u0000\u0000\\\u01f7"+ - "\u0001\u0000\u0000\u0000^\u01fb\u0001\u0000\u0000\u0000`\u01fd\u0001\u0000"+ - "\u0000\u0000b\u01ff\u0001\u0000\u0000\u0000d\u0202\u0001\u0000\u0000\u0000"+ - "f\u0206\u0001\u0000\u0000\u0000h\u0209\u0001\u0000\u0000\u0000j\u020c"+ - "\u0001\u0000\u0000\u0000l\u0220\u0001\u0000\u0000\u0000n\u0224\u0001\u0000"+ - "\u0000\u0000pq\u0003\u0002\u0001\u0000qr\u0005\u0000\u0000\u0001r\u0001"+ - "\u0001\u0000\u0000\u0000st\u0006\u0001\uffff\uffff\u0000tu\u0003\u0004"+ - "\u0002\u0000u{\u0001\u0000\u0000\u0000vw\n\u0001\u0000\u0000wx\u0005\u001d"+ - "\u0000\u0000xz\u0003\u0006\u0003\u0000yv\u0001\u0000\u0000\u0000z}\u0001"+ - "\u0000\u0000\u0000{y\u0001\u0000\u0000\u0000{|\u0001\u0000\u0000\u0000"+ - "|\u0003\u0001\u0000\u0000\u0000}{\u0001\u0000\u0000\u0000~\u0085\u0003"+ - "b1\u0000\u007f\u0085\u0003\u001e\u000f\u0000\u0080\u0085\u0003\u0018\f"+ - "\u0000\u0081\u0085\u0003(\u0014\u0000\u0082\u0085\u0003f3\u0000\u0083"+ - "\u0085\u0003h4\u0000\u0084~\u0001\u0000\u0000\u0000\u0084\u007f\u0001"+ - "\u0000\u0000\u0000\u0084\u0080\u0001\u0000\u0000\u0000\u0084\u0081\u0001"+ - "\u0000\u0000\u0000\u0084\u0082\u0001\u0000\u0000\u0000\u0084\u0083\u0001"+ - "\u0000\u0000\u0000\u0085\u0005\u0001\u0000\u0000\u0000\u0086\u0095\u0003"+ - "*\u0015\u0000\u0087\u0095\u0003.\u0017\u0000\u0088\u0095\u0003>\u001f"+ - "\u0000\u0089\u0095\u0003n7\u0000\u008a\u0095\u0003D\"\u0000\u008b\u0095"+ - "\u0003@ \u0000\u008c\u0095\u0003,\u0016\u0000\u008d\u0095\u0003\b\u0004"+ - "\u0000\u008e\u0095\u0003F#\u0000\u008f\u0095\u0003H$\u0000\u0090\u0095"+ - "\u0003L&\u0000\u0091\u0095\u0003N\'\u0000\u0092\u0095\u0003j5\u0000\u0093"+ - "\u0095\u0003P(\u0000\u0094\u0086\u0001\u0000\u0000\u0000\u0094\u0087\u0001"+ - "\u0000\u0000\u0000\u0094\u0088\u0001\u0000\u0000\u0000\u0094\u0089\u0001"+ - "\u0000\u0000\u0000\u0094\u008a\u0001\u0000\u0000\u0000\u0094\u008b\u0001"+ - "\u0000\u0000\u0000\u0094\u008c\u0001\u0000\u0000\u0000\u0094\u008d\u0001"+ - "\u0000\u0000\u0000\u0094\u008e\u0001\u0000\u0000\u0000\u0094\u008f\u0001"+ - "\u0000\u0000\u0000\u0094\u0090\u0001\u0000\u0000\u0000\u0094\u0091\u0001"+ - "\u0000\u0000\u0000\u0094\u0092\u0001\u0000\u0000\u0000\u0094\u0093\u0001"+ - "\u0000\u0000\u0000\u0095\u0007\u0001\u0000\u0000\u0000\u0096\u0097\u0005"+ - "\u0014\u0000\u0000\u0097\u0098\u0003\n\u0005\u0000\u0098\t\u0001\u0000"+ - "\u0000\u0000\u0099\u009a\u0006\u0005\uffff\uffff\u0000\u009a\u009b\u0005"+ - "0\u0000\u0000\u009b\u00b6\u0003\n\u0005\u0007\u009c\u00b6\u0003\u000e"+ - "\u0007\u0000\u009d\u00b6\u0003\f\u0006\u0000\u009e\u00a0\u0003\u000e\u0007"+ - "\u0000\u009f\u00a1\u00050\u0000\u0000\u00a0\u009f\u0001\u0000\u0000\u0000"+ - "\u00a0\u00a1\u0001\u0000\u0000\u0000\u00a1\u00a2\u0001\u0000\u0000\u0000"+ - "\u00a2\u00a3\u0005-\u0000\u0000\u00a3\u00a4\u0005,\u0000\u0000\u00a4\u00a9"+ - "\u0003\u000e\u0007\u0000\u00a5\u00a6\u0005&\u0000\u0000\u00a6\u00a8\u0003"+ - "\u000e\u0007\u0000\u00a7\u00a5\u0001\u0000\u0000\u0000\u00a8\u00ab\u0001"+ - "\u0000\u0000\u0000\u00a9\u00a7\u0001\u0000\u0000\u0000\u00a9\u00aa\u0001"+ - "\u0000\u0000\u0000\u00aa\u00ac\u0001\u0000\u0000\u0000\u00ab\u00a9\u0001"+ - "\u0000\u0000\u0000\u00ac\u00ad\u00056\u0000\u0000\u00ad\u00b6\u0001\u0000"+ - "\u0000\u0000\u00ae\u00af\u0003\u000e\u0007\u0000\u00af\u00b1\u0005.\u0000"+ - "\u0000\u00b0\u00b2\u00050\u0000\u0000\u00b1\u00b0\u0001\u0000\u0000\u0000"+ - "\u00b1\u00b2\u0001\u0000\u0000\u0000\u00b2\u00b3\u0001\u0000\u0000\u0000"+ - "\u00b3\u00b4\u00051\u0000\u0000\u00b4\u00b6\u0001\u0000\u0000\u0000\u00b5"+ - "\u0099\u0001\u0000\u0000\u0000\u00b5\u009c\u0001\u0000\u0000\u0000\u00b5"+ - "\u009d\u0001\u0000\u0000\u0000\u00b5\u009e\u0001\u0000\u0000\u0000\u00b5"+ - "\u00ae\u0001\u0000\u0000\u0000\u00b6\u00bf\u0001\u0000\u0000\u0000\u00b7"+ - "\u00b8\n\u0004\u0000\u0000\u00b8\u00b9\u0005\"\u0000\u0000\u00b9\u00be"+ - "\u0003\n\u0005\u0005\u00ba\u00bb\n\u0003\u0000\u0000\u00bb\u00bc\u0005"+ - "3\u0000\u0000\u00bc\u00be\u0003\n\u0005\u0004\u00bd\u00b7\u0001\u0000"+ - "\u0000\u0000\u00bd\u00ba\u0001\u0000\u0000\u0000\u00be\u00c1\u0001\u0000"+ - "\u0000\u0000\u00bf\u00bd\u0001\u0000\u0000\u0000\u00bf\u00c0\u0001\u0000"+ - "\u0000\u0000\u00c0\u000b\u0001\u0000\u0000\u0000\u00c1\u00bf\u0001\u0000"+ - "\u0000\u0000\u00c2\u00c4\u0003\u000e\u0007\u0000\u00c3\u00c5\u00050\u0000"+ - "\u0000\u00c4\u00c3\u0001\u0000\u0000\u0000\u00c4\u00c5\u0001\u0000\u0000"+ - "\u0000\u00c5\u00c6\u0001\u0000\u0000\u0000\u00c6\u00c7\u0005/\u0000\u0000"+ - "\u00c7\u00c8\u0003^/\u0000\u00c8\u00d1\u0001\u0000\u0000\u0000\u00c9\u00cb"+ - "\u0003\u000e\u0007\u0000\u00ca\u00cc\u00050\u0000\u0000\u00cb\u00ca\u0001"+ - "\u0000\u0000\u0000\u00cb\u00cc\u0001\u0000\u0000\u0000\u00cc\u00cd\u0001"+ - "\u0000\u0000\u0000\u00cd\u00ce\u00055\u0000\u0000\u00ce\u00cf\u0003^/"+ - "\u0000\u00cf\u00d1\u0001\u0000\u0000\u0000\u00d0\u00c2\u0001\u0000\u0000"+ - "\u0000\u00d0\u00c9\u0001\u0000\u0000\u0000\u00d1\r\u0001\u0000\u0000\u0000"+ - "\u00d2\u00d8\u0003\u0010\b\u0000\u00d3\u00d4\u0003\u0010\b\u0000\u00d4"+ - "\u00d5\u0003`0\u0000\u00d5\u00d6\u0003\u0010\b\u0000\u00d6\u00d8\u0001"+ - "\u0000\u0000\u0000\u00d7\u00d2\u0001\u0000\u0000\u0000\u00d7\u00d3\u0001"+ - "\u0000\u0000\u0000\u00d8\u000f\u0001\u0000\u0000\u0000\u00d9\u00da\u0006"+ - "\b\uffff\uffff\u0000\u00da\u00de\u0003\u0012\t\u0000\u00db\u00dc\u0007"+ - "\u0000\u0000\u0000\u00dc\u00de\u0003\u0010\b\u0003\u00dd\u00d9\u0001\u0000"+ - "\u0000\u0000\u00dd\u00db\u0001\u0000\u0000\u0000\u00de\u00e7\u0001\u0000"+ - "\u0000\u0000\u00df\u00e0\n\u0002\u0000\u0000\u00e0\u00e1\u0007\u0001\u0000"+ - "\u0000\u00e1\u00e6\u0003\u0010\b\u0003\u00e2\u00e3\n\u0001\u0000\u0000"+ - "\u00e3\u00e4\u0007\u0000\u0000\u0000\u00e4\u00e6\u0003\u0010\b\u0002\u00e5"+ - "\u00df\u0001\u0000\u0000\u0000\u00e5\u00e2\u0001\u0000\u0000\u0000\u00e6"+ - "\u00e9\u0001\u0000\u0000\u0000\u00e7\u00e5\u0001\u0000\u0000\u0000\u00e7"+ - "\u00e8\u0001\u0000\u0000\u0000\u00e8\u0011\u0001\u0000\u0000\u0000\u00e9"+ - "\u00e7\u0001\u0000\u0000\u0000\u00ea\u00eb\u0006\t\uffff\uffff\u0000\u00eb"+ - "\u00f3\u0003:\u001d\u0000\u00ec\u00f3\u00030\u0018\u0000\u00ed\u00f3\u0003"+ - "\u0014\n\u0000\u00ee\u00ef\u0005,\u0000\u0000\u00ef\u00f0\u0003\n\u0005"+ - "\u0000\u00f0\u00f1\u00056\u0000\u0000\u00f1\u00f3\u0001\u0000\u0000\u0000"+ - "\u00f2\u00ea\u0001\u0000\u0000\u0000\u00f2\u00ec\u0001\u0000\u0000\u0000"+ - "\u00f2\u00ed\u0001\u0000\u0000\u0000\u00f2\u00ee\u0001\u0000\u0000\u0000"+ - "\u00f3\u00f9\u0001\u0000\u0000\u0000\u00f4\u00f5\n\u0001\u0000\u0000\u00f5"+ - "\u00f6\u0005%\u0000\u0000\u00f6\u00f8\u0003\u0016\u000b\u0000\u00f7\u00f4"+ - "\u0001\u0000\u0000\u0000\u00f8\u00fb\u0001\u0000\u0000\u0000\u00f9\u00f7"+ - "\u0001\u0000\u0000\u0000\u00f9\u00fa\u0001\u0000\u0000\u0000\u00fa\u0013"+ - "\u0001\u0000\u0000\u0000\u00fb\u00f9\u0001\u0000\u0000\u0000\u00fc\u00fd"+ - "\u00036\u001b\u0000\u00fd\u0107\u0005,\u0000\u0000\u00fe\u0108\u0005A"+ - "\u0000\u0000\u00ff\u0104\u0003\n\u0005\u0000\u0100\u0101\u0005&\u0000"+ - "\u0000\u0101\u0103\u0003\n\u0005\u0000\u0102\u0100\u0001\u0000\u0000\u0000"+ - "\u0103\u0106\u0001\u0000\u0000\u0000\u0104\u0102\u0001\u0000\u0000\u0000"+ - "\u0104\u0105\u0001\u0000\u0000\u0000\u0105\u0108\u0001\u0000\u0000\u0000"+ - "\u0106\u0104\u0001\u0000\u0000\u0000\u0107\u00fe\u0001\u0000\u0000\u0000"+ - "\u0107\u00ff\u0001\u0000\u0000\u0000\u0107\u0108\u0001\u0000\u0000\u0000"+ - "\u0108\u0109\u0001\u0000\u0000\u0000\u0109\u010a\u00056\u0000\u0000\u010a"+ - "\u0015\u0001\u0000\u0000\u0000\u010b\u010c\u00036\u001b\u0000\u010c\u0017"+ - "\u0001\u0000\u0000\u0000\u010d\u010e\u0005\u0010\u0000\u0000\u010e\u010f"+ - "\u0003\u001a\r\u0000\u010f\u0019\u0001\u0000\u0000\u0000\u0110\u0115\u0003"+ - "\u001c\u000e\u0000\u0111\u0112\u0005&\u0000\u0000\u0112\u0114\u0003\u001c"+ - "\u000e\u0000\u0113\u0111\u0001\u0000\u0000\u0000\u0114\u0117\u0001\u0000"+ - "\u0000\u0000\u0115\u0113\u0001\u0000\u0000\u0000\u0115\u0116\u0001\u0000"+ - "\u0000\u0000\u0116\u001b\u0001\u0000\u0000\u0000\u0117\u0115\u0001\u0000"+ - "\u0000\u0000\u0118\u011e\u0003\n\u0005\u0000\u0119\u011a\u00030\u0018"+ - "\u0000\u011a\u011b\u0005$\u0000\u0000\u011b\u011c\u0003\n\u0005\u0000"+ - "\u011c\u011e\u0001\u0000\u0000\u0000\u011d\u0118\u0001\u0000\u0000\u0000"+ - "\u011d\u0119\u0001\u0000\u0000\u0000\u011e\u001d\u0001\u0000\u0000\u0000"+ - "\u011f\u0120\u0005\u0006\u0000\u0000\u0120\u0125\u0003 \u0010\u0000\u0121"+ - "\u0122\u0005&\u0000\u0000\u0122\u0124\u0003 \u0010\u0000\u0123\u0121\u0001"+ - "\u0000\u0000\u0000\u0124\u0127\u0001\u0000\u0000\u0000\u0125\u0123\u0001"+ - "\u0000\u0000\u0000\u0125\u0126\u0001\u0000\u0000\u0000\u0126\u0129\u0001"+ - "\u0000\u0000\u0000\u0127\u0125\u0001\u0000\u0000\u0000\u0128\u012a\u0003"+ - "\"\u0011\u0000\u0129\u0128\u0001\u0000\u0000\u0000\u0129\u012a\u0001\u0000"+ - "\u0000\u0000\u012a\u001f\u0001\u0000\u0000\u0000\u012b\u012c\u0005\u0019"+ - "\u0000\u0000\u012c!\u0001\u0000\u0000\u0000\u012d\u0130\u0003$\u0012\u0000"+ - "\u012e\u0130\u0003&\u0013\u0000\u012f\u012d\u0001\u0000\u0000\u0000\u012f"+ - "\u012e\u0001\u0000\u0000\u0000\u0130#\u0001\u0000\u0000\u0000\u0131\u0132"+ - "\u0005L\u0000\u0000\u0132\u0137\u0003 \u0010\u0000\u0133\u0134\u0005&"+ - "\u0000\u0000\u0134\u0136\u0003 \u0010\u0000\u0135\u0133\u0001\u0000\u0000"+ - "\u0000\u0136\u0139\u0001\u0000\u0000\u0000\u0137\u0135\u0001\u0000\u0000"+ - "\u0000\u0137\u0138\u0001\u0000\u0000\u0000\u0138%\u0001\u0000\u0000\u0000"+ - "\u0139\u0137\u0001\u0000\u0000\u0000\u013a\u013b\u0005E\u0000\u0000\u013b"+ - "\u013c\u0003$\u0012\u0000\u013c\u013d\u0005F\u0000\u0000\u013d\'\u0001"+ - "\u0000\u0000\u0000\u013e\u013f\u0005\r\u0000\u0000\u013f\u0144\u0003 "+ - "\u0010\u0000\u0140\u0141\u0005&\u0000\u0000\u0141\u0143\u0003 \u0010\u0000"+ - "\u0142\u0140\u0001\u0000\u0000\u0000\u0143\u0146\u0001\u0000\u0000\u0000"+ - "\u0144\u0142\u0001\u0000\u0000\u0000\u0144\u0145\u0001\u0000\u0000\u0000"+ - "\u0145\u0148\u0001\u0000\u0000\u0000\u0146\u0144\u0001\u0000\u0000\u0000"+ - "\u0147\u0149\u0003\u001a\r\u0000\u0148\u0147\u0001\u0000\u0000\u0000\u0148"+ - "\u0149\u0001\u0000\u0000\u0000\u0149\u014c\u0001\u0000\u0000\u0000\u014a"+ - "\u014b\u0005!\u0000\u0000\u014b\u014d\u0003\u001a\r\u0000\u014c\u014a"+ - "\u0001\u0000\u0000\u0000\u014c\u014d\u0001\u0000\u0000\u0000\u014d)\u0001"+ - "\u0000\u0000\u0000\u014e\u014f\u0005\u0004\u0000\u0000\u014f\u0150\u0003"+ - "\u001a\r\u0000\u0150+\u0001\u0000\u0000\u0000\u0151\u0153\u0005\u0013"+ - "\u0000\u0000\u0152\u0154\u0003\u001a\r\u0000\u0153\u0152\u0001\u0000\u0000"+ - "\u0000\u0153\u0154\u0001\u0000\u0000\u0000\u0154\u0157\u0001\u0000\u0000"+ - "\u0000\u0155\u0156\u0005!\u0000\u0000\u0156\u0158\u0003\u001a\r\u0000"+ - "\u0157\u0155\u0001\u0000\u0000\u0000\u0157\u0158\u0001\u0000\u0000\u0000"+ - "\u0158-\u0001\u0000\u0000\u0000\u0159\u015a\u0005\b\u0000\u0000\u015a"+ - "\u015d\u0003\u001a\r\u0000\u015b\u015c\u0005!\u0000\u0000\u015c\u015e"+ - "\u0003\u001a\r\u0000\u015d\u015b\u0001\u0000\u0000\u0000\u015d\u015e\u0001"+ - "\u0000\u0000\u0000\u015e/\u0001\u0000\u0000\u0000\u015f\u0164\u00036\u001b"+ - "\u0000\u0160\u0161\u0005(\u0000\u0000\u0161\u0163\u00036\u001b\u0000\u0162"+ - "\u0160\u0001\u0000\u0000\u0000\u0163\u0166\u0001\u0000\u0000\u0000\u0164"+ - "\u0162\u0001\u0000\u0000\u0000\u0164\u0165\u0001\u0000\u0000\u0000\u0165"+ - "1\u0001\u0000\u0000\u0000\u0166\u0164\u0001\u0000\u0000\u0000\u0167\u016c"+ - "\u00038\u001c\u0000\u0168\u0169\u0005(\u0000\u0000\u0169\u016b\u00038"+ - "\u001c\u0000\u016a\u0168\u0001\u0000\u0000\u0000\u016b\u016e\u0001\u0000"+ - "\u0000\u0000\u016c\u016a\u0001\u0000\u0000\u0000\u016c\u016d\u0001\u0000"+ - "\u0000\u0000\u016d3\u0001\u0000\u0000\u0000\u016e\u016c\u0001\u0000\u0000"+ - "\u0000\u016f\u0174\u00032\u0019\u0000\u0170\u0171\u0005&\u0000\u0000\u0171"+ - "\u0173\u00032\u0019\u0000\u0172\u0170\u0001\u0000\u0000\u0000\u0173\u0176"+ - "\u0001\u0000\u0000\u0000\u0174\u0172\u0001\u0000\u0000\u0000\u0174\u0175"+ - "\u0001\u0000\u0000\u0000\u01755\u0001\u0000\u0000\u0000\u0176\u0174\u0001"+ - "\u0000\u0000\u0000\u0177\u0178\u0007\u0002\u0000\u0000\u01787\u0001\u0000"+ - "\u0000\u0000\u0179\u017a\u0005P\u0000\u0000\u017a9\u0001\u0000\u0000\u0000"+ - "\u017b\u01a6\u00051\u0000\u0000\u017c\u017d\u0003\\.\u0000\u017d\u017e"+ - "\u0005G\u0000\u0000\u017e\u01a6\u0001\u0000\u0000\u0000\u017f\u01a6\u0003"+ - "Z-\u0000\u0180\u01a6\u0003\\.\u0000\u0181\u01a6\u0003V+\u0000\u0182\u01a6"+ - "\u0003<\u001e\u0000\u0183\u01a6\u0003^/\u0000\u0184\u0185\u0005E\u0000"+ - "\u0000\u0185\u018a\u0003X,\u0000\u0186\u0187\u0005&\u0000\u0000\u0187"+ - "\u0189\u0003X,\u0000\u0188\u0186\u0001\u0000\u0000\u0000\u0189\u018c\u0001"+ - "\u0000\u0000\u0000\u018a\u0188\u0001\u0000\u0000\u0000\u018a\u018b\u0001"+ - "\u0000\u0000\u0000\u018b\u018d\u0001\u0000\u0000\u0000\u018c\u018a\u0001"+ - "\u0000\u0000\u0000\u018d\u018e\u0005F\u0000\u0000\u018e\u01a6\u0001\u0000"+ - "\u0000\u0000\u018f\u0190\u0005E\u0000\u0000\u0190\u0195\u0003V+\u0000"+ - "\u0191\u0192\u0005&\u0000\u0000\u0192\u0194\u0003V+\u0000\u0193\u0191"+ - "\u0001\u0000\u0000\u0000\u0194\u0197\u0001\u0000\u0000\u0000\u0195\u0193"+ - "\u0001\u0000\u0000\u0000\u0195\u0196\u0001\u0000\u0000\u0000\u0196\u0198"+ - "\u0001\u0000\u0000\u0000\u0197\u0195\u0001\u0000\u0000\u0000\u0198\u0199"+ - "\u0005F\u0000\u0000\u0199\u01a6\u0001\u0000\u0000\u0000\u019a\u019b\u0005"+ - "E\u0000\u0000\u019b\u01a0\u0003^/\u0000\u019c\u019d\u0005&\u0000\u0000"+ - "\u019d\u019f\u0003^/\u0000\u019e\u019c\u0001\u0000\u0000\u0000\u019f\u01a2"+ - "\u0001\u0000\u0000\u0000\u01a0\u019e\u0001\u0000\u0000\u0000\u01a0\u01a1"+ - "\u0001\u0000\u0000\u0000\u01a1\u01a3\u0001\u0000\u0000\u0000\u01a2\u01a0"+ - "\u0001\u0000\u0000\u0000\u01a3\u01a4\u0005F\u0000\u0000\u01a4\u01a6\u0001"+ - "\u0000\u0000\u0000\u01a5\u017b\u0001\u0000\u0000\u0000\u01a5\u017c\u0001"+ - "\u0000\u0000\u0000\u01a5\u017f\u0001\u0000\u0000\u0000\u01a5\u0180\u0001"+ - "\u0000\u0000\u0000\u01a5\u0181\u0001\u0000\u0000\u0000\u01a5\u0182\u0001"+ - "\u0000\u0000\u0000\u01a5\u0183\u0001\u0000\u0000\u0000\u01a5\u0184\u0001"+ - "\u0000\u0000\u0000\u01a5\u018f\u0001\u0000\u0000\u0000\u01a5\u019a\u0001"+ - "\u0000\u0000\u0000\u01a6;\u0001\u0000\u0000\u0000\u01a7\u01aa\u00054\u0000"+ - "\u0000\u01a8\u01aa\u0005D\u0000\u0000\u01a9\u01a7\u0001\u0000\u0000\u0000"+ - "\u01a9\u01a8\u0001\u0000\u0000\u0000\u01aa=\u0001\u0000\u0000\u0000\u01ab"+ - "\u01ac\u0005\n\u0000\u0000\u01ac\u01ad\u0005\u001f\u0000\u0000\u01ad?"+ - "\u0001\u0000\u0000\u0000\u01ae\u01af\u0005\u0012\u0000\u0000\u01af\u01b4"+ - "\u0003B!\u0000\u01b0\u01b1\u0005&\u0000\u0000\u01b1\u01b3\u0003B!\u0000"+ - "\u01b2\u01b0\u0001\u0000\u0000\u0000\u01b3\u01b6\u0001\u0000\u0000\u0000"+ - "\u01b4\u01b2\u0001\u0000\u0000\u0000\u01b4\u01b5\u0001\u0000\u0000\u0000"+ - "\u01b5A\u0001\u0000\u0000\u0000\u01b6\u01b4\u0001\u0000\u0000\u0000\u01b7"+ - "\u01b9\u0003\n\u0005\u0000\u01b8\u01ba\u0007\u0003\u0000\u0000\u01b9\u01b8"+ - "\u0001\u0000\u0000\u0000\u01b9\u01ba\u0001\u0000\u0000\u0000\u01ba\u01bd"+ - "\u0001\u0000\u0000\u0000\u01bb\u01bc\u00052\u0000\u0000\u01bc\u01be\u0007"+ - "\u0004\u0000\u0000\u01bd\u01bb\u0001\u0000\u0000\u0000\u01bd\u01be\u0001"+ - "\u0000\u0000\u0000\u01beC\u0001\u0000\u0000\u0000\u01bf\u01c0\u0005\t"+ - "\u0000\u0000\u01c0\u01c1\u00034\u001a\u0000\u01c1E\u0001\u0000\u0000\u0000"+ - "\u01c2\u01c3\u0005\u0002\u0000\u0000\u01c3\u01c4\u00034\u001a\u0000\u01c4"+ - "G\u0001\u0000\u0000\u0000\u01c5\u01c6\u0005\u000f\u0000\u0000\u01c6\u01cb"+ - "\u0003J%\u0000\u01c7\u01c8\u0005&\u0000\u0000\u01c8\u01ca\u0003J%\u0000"+ - "\u01c9\u01c7\u0001\u0000\u0000\u0000\u01ca\u01cd\u0001\u0000\u0000\u0000"+ - "\u01cb\u01c9\u0001\u0000\u0000\u0000\u01cb\u01cc\u0001\u0000\u0000\u0000"+ - "\u01ccI\u0001\u0000\u0000\u0000\u01cd\u01cb\u0001\u0000\u0000\u0000\u01ce"+ - "\u01cf\u00032\u0019\u0000\u01cf\u01d0\u0005T\u0000\u0000\u01d0\u01d1\u0003"+ - "2\u0019\u0000\u01d1K\u0001\u0000\u0000\u0000\u01d2\u01d3\u0005\u0001\u0000"+ - "\u0000\u01d3\u01d4\u0003\u0012\t\u0000\u01d4\u01d6\u0003^/\u0000\u01d5"+ - "\u01d7\u0003R)\u0000\u01d6\u01d5\u0001\u0000\u0000\u0000\u01d6\u01d7\u0001"+ - "\u0000\u0000\u0000\u01d7M\u0001\u0000\u0000\u0000\u01d8\u01d9\u0005\u0007"+ - "\u0000\u0000\u01d9\u01da\u0003\u0012\t\u0000\u01da\u01db\u0003^/\u0000"+ - "\u01dbO\u0001\u0000\u0000\u0000\u01dc\u01dd\u0005\u000e\u0000\u0000\u01dd"+ - "\u01de\u00030\u0018\u0000\u01deQ\u0001\u0000\u0000\u0000\u01df\u01e4\u0003"+ - "T*\u0000\u01e0\u01e1\u0005&\u0000\u0000\u01e1\u01e3\u0003T*\u0000\u01e2"+ - "\u01e0\u0001\u0000\u0000\u0000\u01e3\u01e6\u0001\u0000\u0000\u0000\u01e4"+ - "\u01e2\u0001\u0000\u0000\u0000\u01e4\u01e5\u0001\u0000\u0000\u0000\u01e5"+ - "S\u0001\u0000\u0000\u0000\u01e6\u01e4\u0001\u0000\u0000\u0000\u01e7\u01e8"+ - "\u00036\u001b\u0000\u01e8\u01e9\u0005$\u0000\u0000\u01e9\u01ea\u0003:"+ - "\u001d\u0000\u01eaU\u0001\u0000\u0000\u0000\u01eb\u01ec\u0007\u0005\u0000"+ - "\u0000\u01ecW\u0001\u0000\u0000\u0000\u01ed\u01f0\u0003Z-\u0000\u01ee"+ - "\u01f0\u0003\\.\u0000\u01ef\u01ed\u0001\u0000\u0000\u0000\u01ef\u01ee"+ - "\u0001\u0000\u0000\u0000\u01f0Y\u0001\u0000\u0000\u0000\u01f1\u01f3\u0007"+ - "\u0000\u0000\u0000\u01f2\u01f1\u0001\u0000\u0000\u0000\u01f2\u01f3\u0001"+ - "\u0000\u0000\u0000\u01f3\u01f4\u0001\u0000\u0000\u0000\u01f4\u01f5\u0005"+ - " \u0000\u0000\u01f5[\u0001\u0000\u0000\u0000\u01f6\u01f8\u0007\u0000\u0000"+ - "\u0000\u01f7\u01f6\u0001\u0000\u0000\u0000\u01f7\u01f8\u0001\u0000\u0000"+ - "\u0000\u01f8\u01f9\u0001\u0000\u0000\u0000\u01f9\u01fa\u0005\u001f\u0000"+ - "\u0000\u01fa]\u0001\u0000\u0000\u0000\u01fb\u01fc\u0005\u001e\u0000\u0000"+ - "\u01fc_\u0001\u0000\u0000\u0000\u01fd\u01fe\u0007\u0006\u0000\u0000\u01fe"+ - "a\u0001\u0000\u0000\u0000\u01ff\u0200\u0005\u0005\u0000\u0000\u0200\u0201"+ - "\u0003d2\u0000\u0201c\u0001\u0000\u0000\u0000\u0202\u0203\u0005E\u0000"+ - "\u0000\u0203\u0204\u0003\u0002\u0001\u0000\u0204\u0205\u0005F\u0000\u0000"+ - "\u0205e\u0001\u0000\u0000\u0000\u0206\u0207\u0005\u0011\u0000\u0000\u0207"+ - "\u0208\u0005j\u0000\u0000\u0208g\u0001\u0000\u0000\u0000\u0209\u020a\u0005"+ - "\f\u0000\u0000\u020a\u020b\u0005n\u0000\u0000\u020bi\u0001\u0000\u0000"+ - "\u0000\u020c\u020d\u0005\u0003\u0000\u0000\u020d\u0210\u0005Z\u0000\u0000"+ - "\u020e\u020f\u0005X\u0000\u0000\u020f\u0211\u00032\u0019\u0000\u0210\u020e"+ - "\u0001\u0000\u0000\u0000\u0210\u0211\u0001\u0000\u0000\u0000\u0211\u021b"+ - "\u0001\u0000\u0000\u0000\u0212\u0213\u0005Y\u0000\u0000\u0213\u0218\u0003"+ - "l6\u0000\u0214\u0215\u0005&\u0000\u0000\u0215\u0217\u0003l6\u0000\u0216"+ - "\u0214\u0001\u0000\u0000\u0000\u0217\u021a\u0001\u0000\u0000\u0000\u0218"+ - "\u0216\u0001\u0000\u0000\u0000\u0218\u0219\u0001\u0000\u0000\u0000\u0219"+ - "\u021c\u0001\u0000\u0000\u0000\u021a\u0218\u0001\u0000\u0000\u0000\u021b"+ - "\u0212\u0001\u0000\u0000\u0000\u021b\u021c\u0001\u0000\u0000\u0000\u021c"+ - "k\u0001\u0000\u0000\u0000\u021d\u021e\u00032\u0019\u0000\u021e\u021f\u0005"+ - "$\u0000\u0000\u021f\u0221\u0001\u0000\u0000\u0000\u0220\u021d\u0001\u0000"+ - "\u0000\u0000\u0220\u0221\u0001\u0000\u0000\u0000\u0221\u0222\u0001\u0000"+ - "\u0000\u0000\u0222\u0223\u00032\u0019\u0000\u0223m\u0001\u0000\u0000\u0000"+ - "\u0224\u0225\u0005\u000b\u0000\u0000\u0225\u0226\u0005\u0019\u0000\u0000"+ - "\u0226\u0227\u0005X\u0000\u0000\u0227\u0228\u00034\u001a\u0000\u0228o"+ - "\u0001\u0000\u0000\u00005{\u0084\u0094\u00a0\u00a9\u00b1\u00b5\u00bd\u00bf"+ - "\u00c4\u00cb\u00d0\u00d7\u00dd\u00e5\u00e7\u00f2\u00f9\u0104\u0107\u0115"+ - "\u011d\u0125\u0129\u012f\u0137\u0144\u0148\u014c\u0153\u0157\u015d\u0164"+ - "\u016c\u0174\u018a\u0195\u01a0\u01a5\u01a9\u01b4\u01b9\u01bd\u01cb\u01d6"+ - "\u01e4\u01ef\u01f2\u01f7\u0210\u0218\u021b\u0220"; + "\u0003\u0001\u0003\u0001\u0003\u0003\u0003\u0099\b\u0003\u0001\u0004\u0001"+ + "\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00a5\b\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0005\u0005\u00ac\b\u0005\n"+ + "\u0005\f\u0005\u00af\t\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0003\u0005\u00b6\b\u0005\u0001\u0005\u0001\u0005\u0003"+ + "\u0005\u00ba\b\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0005\u0005\u00c2\b\u0005\n\u0005\f\u0005\u00c5\t\u0005"+ + "\u0001\u0006\u0001\u0006\u0003\u0006\u00c9\b\u0006\u0001\u0006\u0001\u0006"+ + "\u0001\u0006\u0001\u0006\u0001\u0006\u0003\u0006\u00d0\b\u0006\u0001\u0006"+ + "\u0001\u0006\u0001\u0006\u0003\u0006\u00d5\b\u0006\u0001\u0007\u0001\u0007"+ + "\u0001\u0007\u0001\u0007\u0001\u0007\u0003\u0007\u00dc\b\u0007\u0001\b"+ + "\u0001\b\u0001\b\u0001\b\u0003\b\u00e2\b\b\u0001\b\u0001\b\u0001\b\u0001"+ + "\b\u0001\b\u0001\b\u0005\b\u00ea\b\b\n\b\f\b\u00ed\t\b\u0001\t\u0001\t"+ + "\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0003\t\u00f7\b\t\u0001"+ + "\t\u0001\t\u0001\t\u0005\t\u00fc\b\t\n\t\f\t\u00ff\t\t\u0001\n\u0001\n"+ + "\u0001\n\u0001\n\u0001\n\u0001\n\u0005\n\u0107\b\n\n\n\f\n\u010a\t\n\u0003"+ + "\n\u010c\b\n\u0001\n\u0001\n\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001"+ + "\f\u0001\r\u0001\r\u0001\r\u0005\r\u0118\b\r\n\r\f\r\u011b\t\r\u0001\u000e"+ + "\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0003\u000e\u0122\b\u000e"+ + "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0005\u000f\u0128\b\u000f"+ + "\n\u000f\f\u000f\u012b\t\u000f\u0001\u000f\u0003\u000f\u012e\b\u000f\u0001"+ + "\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0003\u0010\u0135"+ + "\b\u0010\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0013\u0001"+ + "\u0013\u0003\u0013\u013d\b\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001"+ + "\u0014\u0005\u0014\u0143\b\u0014\n\u0014\f\u0014\u0146\t\u0014\u0001\u0015"+ + "\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0016\u0001\u0016\u0001\u0016"+ + "\u0001\u0016\u0005\u0016\u0150\b\u0016\n\u0016\f\u0016\u0153\t\u0016\u0001"+ + "\u0016\u0003\u0016\u0156\b\u0016\u0001\u0016\u0001\u0016\u0003\u0016\u015a"+ + "\b\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0003"+ + "\u0018\u0161\b\u0018\u0001\u0018\u0001\u0018\u0003\u0018\u0165\b\u0018"+ + "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0003\u0019\u016b\b\u0019"+ + "\u0001\u001a\u0001\u001a\u0001\u001a\u0005\u001a\u0170\b\u001a\n\u001a"+ + "\f\u001a\u0173\t\u001a\u0001\u001b\u0001\u001b\u0001\u001b\u0005\u001b"+ + "\u0178\b\u001b\n\u001b\f\u001b\u017b\t\u001b\u0001\u001c\u0001\u001c\u0001"+ + "\u001c\u0005\u001c\u0180\b\u001c\n\u001c\f\u001c\u0183\t\u001c\u0001\u001d"+ + "\u0001\u001d\u0001\u001e\u0001\u001e\u0001\u001f\u0001\u001f\u0001\u001f"+ + "\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f"+ + "\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0005\u001f\u0196\b\u001f"+ + "\n\u001f\f\u001f\u0199\t\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001"+ + "\u001f\u0001\u001f\u0001\u001f\u0005\u001f\u01a1\b\u001f\n\u001f\f\u001f"+ + "\u01a4\t\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f"+ + "\u0001\u001f\u0005\u001f\u01ac\b\u001f\n\u001f\f\u001f\u01af\t\u001f\u0001"+ + "\u001f\u0001\u001f\u0003\u001f\u01b3\b\u001f\u0001 \u0001 \u0003 \u01b7"+ + "\b \u0001!\u0001!\u0001!\u0001\"\u0001\"\u0001\"\u0001\"\u0005\"\u01c0"+ + "\b\"\n\"\f\"\u01c3\t\"\u0001#\u0001#\u0003#\u01c7\b#\u0001#\u0001#\u0003"+ + "#\u01cb\b#\u0001$\u0001$\u0001$\u0001%\u0001%\u0001%\u0001&\u0001&\u0001"+ + "&\u0001&\u0005&\u01d7\b&\n&\f&\u01da\t&\u0001\'\u0001\'\u0001\'\u0001"+ + "\'\u0001(\u0001(\u0001(\u0001(\u0003(\u01e4\b(\u0001)\u0001)\u0001)\u0001"+ + ")\u0001*\u0001*\u0001*\u0001+\u0001+\u0001+\u0005+\u01f0\b+\n+\f+\u01f3"+ + "\t+\u0001,\u0001,\u0001,\u0001,\u0001-\u0001-\u0001.\u0001.\u0003.\u01fd"+ + "\b.\u0001/\u0003/\u0200\b/\u0001/\u0001/\u00010\u00030\u0205\b0\u0001"+ + "0\u00010\u00011\u00011\u00012\u00012\u00013\u00013\u00013\u00014\u0001"+ + "4\u00014\u00014\u00015\u00015\u00015\u00016\u00016\u00016\u00017\u0001"+ + "7\u00017\u00017\u00037\u021e\b7\u00017\u00017\u00017\u00017\u00057\u0224"+ + "\b7\n7\f7\u0227\t7\u00037\u0229\b7\u00018\u00018\u00018\u00038\u022e\b"+ + "8\u00018\u00018\u00019\u00019\u00019\u00019\u00019\u00019\u0000\u0004"+ + "\u0002\n\u0010\u0012:\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012"+ + "\u0014\u0016\u0018\u001a\u001c\u001e \"$&(*,.02468:<>@BDFHJLNPRTVXZ\\"+ + "^`bdfhjlnpr\u0000\b\u0001\u0000?@\u0001\u0000AC\u0002\u0000\u0019\u0019"+ + "\u001e\u001e\u0001\u0000GH\u0002\u0000##\'\'\u0001\u0000*+\u0002\u0000"+ + "))77\u0002\u000088:>\u0250\u0000t\u0001\u0000\u0000\u0000\u0002w\u0001"+ + "\u0000\u0000\u0000\u0004\u0088\u0001\u0000\u0000\u0000\u0006\u0098\u0001"+ + "\u0000\u0000\u0000\b\u009a\u0001\u0000\u0000\u0000\n\u00b9\u0001\u0000"+ + "\u0000\u0000\f\u00d4\u0001\u0000\u0000\u0000\u000e\u00db\u0001\u0000\u0000"+ + "\u0000\u0010\u00e1\u0001\u0000\u0000\u0000\u0012\u00f6\u0001\u0000\u0000"+ + "\u0000\u0014\u0100\u0001\u0000\u0000\u0000\u0016\u010f\u0001\u0000\u0000"+ + "\u0000\u0018\u0111\u0001\u0000\u0000\u0000\u001a\u0114\u0001\u0000\u0000"+ + "\u0000\u001c\u0121\u0001\u0000\u0000\u0000\u001e\u0123\u0001\u0000\u0000"+ + "\u0000 \u0134\u0001\u0000\u0000\u0000\"\u0136\u0001\u0000\u0000\u0000"+ + "$\u0138\u0001\u0000\u0000\u0000&\u013c\u0001\u0000\u0000\u0000(\u013e"+ + "\u0001\u0000\u0000\u0000*\u0147\u0001\u0000\u0000\u0000,\u014b\u0001\u0000"+ + "\u0000\u0000.\u015b\u0001\u0000\u0000\u00000\u015e\u0001\u0000\u0000\u0000"+ + "2\u0166\u0001\u0000\u0000\u00004\u016c\u0001\u0000\u0000\u00006\u0174"+ + "\u0001\u0000\u0000\u00008\u017c\u0001\u0000\u0000\u0000:\u0184\u0001\u0000"+ + "\u0000\u0000<\u0186\u0001\u0000\u0000\u0000>\u01b2\u0001\u0000\u0000\u0000"+ + "@\u01b6\u0001\u0000\u0000\u0000B\u01b8\u0001\u0000\u0000\u0000D\u01bb"+ + "\u0001\u0000\u0000\u0000F\u01c4\u0001\u0000\u0000\u0000H\u01cc\u0001\u0000"+ + "\u0000\u0000J\u01cf\u0001\u0000\u0000\u0000L\u01d2\u0001\u0000\u0000\u0000"+ + "N\u01db\u0001\u0000\u0000\u0000P\u01df\u0001\u0000\u0000\u0000R\u01e5"+ + "\u0001\u0000\u0000\u0000T\u01e9\u0001\u0000\u0000\u0000V\u01ec\u0001\u0000"+ + "\u0000\u0000X\u01f4\u0001\u0000\u0000\u0000Z\u01f8\u0001\u0000\u0000\u0000"+ + "\\\u01fc\u0001\u0000\u0000\u0000^\u01ff\u0001\u0000\u0000\u0000`\u0204"+ + "\u0001\u0000\u0000\u0000b\u0208\u0001\u0000\u0000\u0000d\u020a\u0001\u0000"+ + "\u0000\u0000f\u020c\u0001\u0000\u0000\u0000h\u020f\u0001\u0000\u0000\u0000"+ + "j\u0213\u0001\u0000\u0000\u0000l\u0216\u0001\u0000\u0000\u0000n\u0219"+ + "\u0001\u0000\u0000\u0000p\u022d\u0001\u0000\u0000\u0000r\u0231\u0001\u0000"+ + "\u0000\u0000tu\u0003\u0002\u0001\u0000uv\u0005\u0000\u0000\u0001v\u0001"+ + "\u0001\u0000\u0000\u0000wx\u0006\u0001\uffff\uffff\u0000xy\u0003\u0004"+ + "\u0002\u0000y\u007f\u0001\u0000\u0000\u0000z{\n\u0001\u0000\u0000{|\u0005"+ + "\u001d\u0000\u0000|~\u0003\u0006\u0003\u0000}z\u0001\u0000\u0000\u0000"+ + "~\u0081\u0001\u0000\u0000\u0000\u007f}\u0001\u0000\u0000\u0000\u007f\u0080"+ + "\u0001\u0000\u0000\u0000\u0080\u0003\u0001\u0000\u0000\u0000\u0081\u007f"+ + "\u0001\u0000\u0000\u0000\u0082\u0089\u0003f3\u0000\u0083\u0089\u0003\u001e"+ + "\u000f\u0000\u0084\u0089\u0003\u0018\f\u0000\u0085\u0089\u0003,\u0016"+ + "\u0000\u0086\u0089\u0003j5\u0000\u0087\u0089\u0003l6\u0000\u0088\u0082"+ + "\u0001\u0000\u0000\u0000\u0088\u0083\u0001\u0000\u0000\u0000\u0088\u0084"+ + "\u0001\u0000\u0000\u0000\u0088\u0085\u0001\u0000\u0000\u0000\u0088\u0086"+ + "\u0001\u0000\u0000\u0000\u0088\u0087\u0001\u0000\u0000\u0000\u0089\u0005"+ + "\u0001\u0000\u0000\u0000\u008a\u0099\u0003.\u0017\u0000\u008b\u0099\u0003"+ + "2\u0019\u0000\u008c\u0099\u0003B!\u0000\u008d\u0099\u0003r9\u0000\u008e"+ + "\u0099\u0003H$\u0000\u008f\u0099\u0003D\"\u0000\u0090\u0099\u00030\u0018"+ + "\u0000\u0091\u0099\u0003\b\u0004\u0000\u0092\u0099\u0003J%\u0000\u0093"+ + "\u0099\u0003L&\u0000\u0094\u0099\u0003P(\u0000\u0095\u0099\u0003R)\u0000"+ + "\u0096\u0099\u0003n7\u0000\u0097\u0099\u0003T*\u0000\u0098\u008a\u0001"+ + "\u0000\u0000\u0000\u0098\u008b\u0001\u0000\u0000\u0000\u0098\u008c\u0001"+ + "\u0000\u0000\u0000\u0098\u008d\u0001\u0000\u0000\u0000\u0098\u008e\u0001"+ + "\u0000\u0000\u0000\u0098\u008f\u0001\u0000\u0000\u0000\u0098\u0090\u0001"+ + "\u0000\u0000\u0000\u0098\u0091\u0001\u0000\u0000\u0000\u0098\u0092\u0001"+ + "\u0000\u0000\u0000\u0098\u0093\u0001\u0000\u0000\u0000\u0098\u0094\u0001"+ + "\u0000\u0000\u0000\u0098\u0095\u0001\u0000\u0000\u0000\u0098\u0096\u0001"+ + "\u0000\u0000\u0000\u0098\u0097\u0001\u0000\u0000\u0000\u0099\u0007\u0001"+ + "\u0000\u0000\u0000\u009a\u009b\u0005\u0014\u0000\u0000\u009b\u009c\u0003"+ + "\n\u0005\u0000\u009c\t\u0001\u0000\u0000\u0000\u009d\u009e\u0006\u0005"+ + "\uffff\uffff\u0000\u009e\u009f\u00050\u0000\u0000\u009f\u00ba\u0003\n"+ + "\u0005\u0007\u00a0\u00ba\u0003\u000e\u0007\u0000\u00a1\u00ba\u0003\f\u0006"+ + "\u0000\u00a2\u00a4\u0003\u000e\u0007\u0000\u00a3\u00a5\u00050\u0000\u0000"+ + "\u00a4\u00a3\u0001\u0000\u0000\u0000\u00a4\u00a5\u0001\u0000\u0000\u0000"+ + "\u00a5\u00a6\u0001\u0000\u0000\u0000\u00a6\u00a7\u0005-\u0000\u0000\u00a7"+ + "\u00a8\u0005,\u0000\u0000\u00a8\u00ad\u0003\u000e\u0007\u0000\u00a9\u00aa"+ + "\u0005&\u0000\u0000\u00aa\u00ac\u0003\u000e\u0007\u0000\u00ab\u00a9\u0001"+ + "\u0000\u0000\u0000\u00ac\u00af\u0001\u0000\u0000\u0000\u00ad\u00ab\u0001"+ + "\u0000\u0000\u0000\u00ad\u00ae\u0001\u0000\u0000\u0000\u00ae\u00b0\u0001"+ + "\u0000\u0000\u0000\u00af\u00ad\u0001\u0000\u0000\u0000\u00b0\u00b1\u0005"+ + "6\u0000\u0000\u00b1\u00ba\u0001\u0000\u0000\u0000\u00b2\u00b3\u0003\u000e"+ + "\u0007\u0000\u00b3\u00b5\u0005.\u0000\u0000\u00b4\u00b6\u00050\u0000\u0000"+ + "\u00b5\u00b4\u0001\u0000\u0000\u0000\u00b5\u00b6\u0001\u0000\u0000\u0000"+ + "\u00b6\u00b7\u0001\u0000\u0000\u0000\u00b7\u00b8\u00051\u0000\u0000\u00b8"+ + "\u00ba\u0001\u0000\u0000\u0000\u00b9\u009d\u0001\u0000\u0000\u0000\u00b9"+ + "\u00a0\u0001\u0000\u0000\u0000\u00b9\u00a1\u0001\u0000\u0000\u0000\u00b9"+ + "\u00a2\u0001\u0000\u0000\u0000\u00b9\u00b2\u0001\u0000\u0000\u0000\u00ba"+ + "\u00c3\u0001\u0000\u0000\u0000\u00bb\u00bc\n\u0004\u0000\u0000\u00bc\u00bd"+ + "\u0005\"\u0000\u0000\u00bd\u00c2\u0003\n\u0005\u0005\u00be\u00bf\n\u0003"+ + "\u0000\u0000\u00bf\u00c0\u00053\u0000\u0000\u00c0\u00c2\u0003\n\u0005"+ + "\u0004\u00c1\u00bb\u0001\u0000\u0000\u0000\u00c1\u00be\u0001\u0000\u0000"+ + "\u0000\u00c2\u00c5\u0001\u0000\u0000\u0000\u00c3\u00c1\u0001\u0000\u0000"+ + "\u0000\u00c3\u00c4\u0001\u0000\u0000\u0000\u00c4\u000b\u0001\u0000\u0000"+ + "\u0000\u00c5\u00c3\u0001\u0000\u0000\u0000\u00c6\u00c8\u0003\u000e\u0007"+ + "\u0000\u00c7\u00c9\u00050\u0000\u0000\u00c8\u00c7\u0001\u0000\u0000\u0000"+ + "\u00c8\u00c9\u0001\u0000\u0000\u0000\u00c9\u00ca\u0001\u0000\u0000\u0000"+ + "\u00ca\u00cb\u0005/\u0000\u0000\u00cb\u00cc\u0003b1\u0000\u00cc\u00d5"+ + "\u0001\u0000\u0000\u0000\u00cd\u00cf\u0003\u000e\u0007\u0000\u00ce\u00d0"+ + "\u00050\u0000\u0000\u00cf\u00ce\u0001\u0000\u0000\u0000\u00cf\u00d0\u0001"+ + "\u0000\u0000\u0000\u00d0\u00d1\u0001\u0000\u0000\u0000\u00d1\u00d2\u0005"+ + "5\u0000\u0000\u00d2\u00d3\u0003b1\u0000\u00d3\u00d5\u0001\u0000\u0000"+ + "\u0000\u00d4\u00c6\u0001\u0000\u0000\u0000\u00d4\u00cd\u0001\u0000\u0000"+ + "\u0000\u00d5\r\u0001\u0000\u0000\u0000\u00d6\u00dc\u0003\u0010\b\u0000"+ + "\u00d7\u00d8\u0003\u0010\b\u0000\u00d8\u00d9\u0003d2\u0000\u00d9\u00da"+ + "\u0003\u0010\b\u0000\u00da\u00dc\u0001\u0000\u0000\u0000\u00db\u00d6\u0001"+ + "\u0000\u0000\u0000\u00db\u00d7\u0001\u0000\u0000\u0000\u00dc\u000f\u0001"+ + "\u0000\u0000\u0000\u00dd\u00de\u0006\b\uffff\uffff\u0000\u00de\u00e2\u0003"+ + "\u0012\t\u0000\u00df\u00e0\u0007\u0000\u0000\u0000\u00e0\u00e2\u0003\u0010"+ + "\b\u0003\u00e1\u00dd\u0001\u0000\u0000\u0000\u00e1\u00df\u0001\u0000\u0000"+ + "\u0000\u00e2\u00eb\u0001\u0000\u0000\u0000\u00e3\u00e4\n\u0002\u0000\u0000"+ + "\u00e4\u00e5\u0007\u0001\u0000\u0000\u00e5\u00ea\u0003\u0010\b\u0003\u00e6"+ + "\u00e7\n\u0001\u0000\u0000\u00e7\u00e8\u0007\u0000\u0000\u0000\u00e8\u00ea"+ + "\u0003\u0010\b\u0002\u00e9\u00e3\u0001\u0000\u0000\u0000\u00e9\u00e6\u0001"+ + "\u0000\u0000\u0000\u00ea\u00ed\u0001\u0000\u0000\u0000\u00eb\u00e9\u0001"+ + "\u0000\u0000\u0000\u00eb\u00ec\u0001\u0000\u0000\u0000\u00ec\u0011\u0001"+ + "\u0000\u0000\u0000\u00ed\u00eb\u0001\u0000\u0000\u0000\u00ee\u00ef\u0006"+ + "\t\uffff\uffff\u0000\u00ef\u00f7\u0003>\u001f\u0000\u00f0\u00f7\u0003"+ + "4\u001a\u0000\u00f1\u00f7\u0003\u0014\n\u0000\u00f2\u00f3\u0005,\u0000"+ + "\u0000\u00f3\u00f4\u0003\n\u0005\u0000\u00f4\u00f5\u00056\u0000\u0000"+ + "\u00f5\u00f7\u0001\u0000\u0000\u0000\u00f6\u00ee\u0001\u0000\u0000\u0000"+ + "\u00f6\u00f0\u0001\u0000\u0000\u0000\u00f6\u00f1\u0001\u0000\u0000\u0000"+ + "\u00f6\u00f2\u0001\u0000\u0000\u0000\u00f7\u00fd\u0001\u0000\u0000\u0000"+ + "\u00f8\u00f9\n\u0001\u0000\u0000\u00f9\u00fa\u0005%\u0000\u0000\u00fa"+ + "\u00fc\u0003\u0016\u000b\u0000\u00fb\u00f8\u0001\u0000\u0000\u0000\u00fc"+ + "\u00ff\u0001\u0000\u0000\u0000\u00fd\u00fb\u0001\u0000\u0000\u0000\u00fd"+ + "\u00fe\u0001\u0000\u0000\u0000\u00fe\u0013\u0001\u0000\u0000\u0000\u00ff"+ + "\u00fd\u0001\u0000\u0000\u0000\u0100\u0101\u0003:\u001d\u0000\u0101\u010b"+ + "\u0005,\u0000\u0000\u0102\u010c\u0005A\u0000\u0000\u0103\u0108\u0003\n"+ + "\u0005\u0000\u0104\u0105\u0005&\u0000\u0000\u0105\u0107\u0003\n\u0005"+ + "\u0000\u0106\u0104\u0001\u0000\u0000\u0000\u0107\u010a\u0001\u0000\u0000"+ + "\u0000\u0108\u0106\u0001\u0000\u0000\u0000\u0108\u0109\u0001\u0000\u0000"+ + "\u0000\u0109\u010c\u0001\u0000\u0000\u0000\u010a\u0108\u0001\u0000\u0000"+ + "\u0000\u010b\u0102\u0001\u0000\u0000\u0000\u010b\u0103\u0001\u0000\u0000"+ + "\u0000\u010b\u010c\u0001\u0000\u0000\u0000\u010c\u010d\u0001\u0000\u0000"+ + "\u0000\u010d\u010e\u00056\u0000\u0000\u010e\u0015\u0001\u0000\u0000\u0000"+ + "\u010f\u0110\u0003:\u001d\u0000\u0110\u0017\u0001\u0000\u0000\u0000\u0111"+ + "\u0112\u0005\u0010\u0000\u0000\u0112\u0113\u0003\u001a\r\u0000\u0113\u0019"+ + "\u0001\u0000\u0000\u0000\u0114\u0119\u0003\u001c\u000e\u0000\u0115\u0116"+ + "\u0005&\u0000\u0000\u0116\u0118\u0003\u001c\u000e\u0000\u0117\u0115\u0001"+ + "\u0000\u0000\u0000\u0118\u011b\u0001\u0000\u0000\u0000\u0119\u0117\u0001"+ + "\u0000\u0000\u0000\u0119\u011a\u0001\u0000\u0000\u0000\u011a\u001b\u0001"+ + "\u0000\u0000\u0000\u011b\u0119\u0001\u0000\u0000\u0000\u011c\u0122\u0003"+ + "\n\u0005\u0000\u011d\u011e\u00034\u001a\u0000\u011e\u011f\u0005$\u0000"+ + "\u0000\u011f\u0120\u0003\n\u0005\u0000\u0120\u0122\u0001\u0000\u0000\u0000"+ + "\u0121\u011c\u0001\u0000\u0000\u0000\u0121\u011d\u0001\u0000\u0000\u0000"+ + "\u0122\u001d\u0001\u0000\u0000\u0000\u0123\u0124\u0005\u0006\u0000\u0000"+ + "\u0124\u0129\u0003 \u0010\u0000\u0125\u0126\u0005&\u0000\u0000\u0126\u0128"+ + "\u0003 \u0010\u0000\u0127\u0125\u0001\u0000\u0000\u0000\u0128\u012b\u0001"+ + "\u0000\u0000\u0000\u0129\u0127\u0001\u0000\u0000\u0000\u0129\u012a\u0001"+ + "\u0000\u0000\u0000\u012a\u012d\u0001\u0000\u0000\u0000\u012b\u0129\u0001"+ + "\u0000\u0000\u0000\u012c\u012e\u0003&\u0013\u0000\u012d\u012c\u0001\u0000"+ + "\u0000\u0000\u012d\u012e\u0001\u0000\u0000\u0000\u012e\u001f\u0001\u0000"+ + "\u0000\u0000\u012f\u0130\u0003\"\u0011\u0000\u0130\u0131\u0005r\u0000"+ + "\u0000\u0131\u0132\u0003$\u0012\u0000\u0132\u0135\u0001\u0000\u0000\u0000"+ + "\u0133\u0135\u0003$\u0012\u0000\u0134\u012f\u0001\u0000\u0000\u0000\u0134"+ + "\u0133\u0001\u0000\u0000\u0000\u0135!\u0001\u0000\u0000\u0000\u0136\u0137"+ + "\u0005\u0019\u0000\u0000\u0137#\u0001\u0000\u0000\u0000\u0138\u0139\u0007"+ + "\u0002\u0000\u0000\u0139%\u0001\u0000\u0000\u0000\u013a\u013d\u0003(\u0014"+ + "\u0000\u013b\u013d\u0003*\u0015\u0000\u013c\u013a\u0001\u0000\u0000\u0000"+ + "\u013c\u013b\u0001\u0000\u0000\u0000\u013d\'\u0001\u0000\u0000\u0000\u013e"+ + "\u013f\u0005L\u0000\u0000\u013f\u0144\u0005\u0019\u0000\u0000\u0140\u0141"+ + "\u0005&\u0000\u0000\u0141\u0143\u0005\u0019\u0000\u0000\u0142\u0140\u0001"+ + "\u0000\u0000\u0000\u0143\u0146\u0001\u0000\u0000\u0000\u0144\u0142\u0001"+ + "\u0000\u0000\u0000\u0144\u0145\u0001\u0000\u0000\u0000\u0145)\u0001\u0000"+ + "\u0000\u0000\u0146\u0144\u0001\u0000\u0000\u0000\u0147\u0148\u0005E\u0000"+ + "\u0000\u0148\u0149\u0003(\u0014\u0000\u0149\u014a\u0005F\u0000\u0000\u014a"+ + "+\u0001\u0000\u0000\u0000\u014b\u014c\u0005\r\u0000\u0000\u014c\u0151"+ + "\u0003 \u0010\u0000\u014d\u014e\u0005&\u0000\u0000\u014e\u0150\u0003 "+ + "\u0010\u0000\u014f\u014d\u0001\u0000\u0000\u0000\u0150\u0153\u0001\u0000"+ + "\u0000\u0000\u0151\u014f\u0001\u0000\u0000\u0000\u0151\u0152\u0001\u0000"+ + "\u0000\u0000\u0152\u0155\u0001\u0000\u0000\u0000\u0153\u0151\u0001\u0000"+ + "\u0000\u0000\u0154\u0156\u0003\u001a\r\u0000\u0155\u0154\u0001\u0000\u0000"+ + "\u0000\u0155\u0156\u0001\u0000\u0000\u0000\u0156\u0159\u0001\u0000\u0000"+ + "\u0000\u0157\u0158\u0005!\u0000\u0000\u0158\u015a\u0003\u001a\r\u0000"+ + "\u0159\u0157\u0001\u0000\u0000\u0000\u0159\u015a\u0001\u0000\u0000\u0000"+ + "\u015a-\u0001\u0000\u0000\u0000\u015b\u015c\u0005\u0004\u0000\u0000\u015c"+ + "\u015d\u0003\u001a\r\u0000\u015d/\u0001\u0000\u0000\u0000\u015e\u0160"+ + "\u0005\u0013\u0000\u0000\u015f\u0161\u0003\u001a\r\u0000\u0160\u015f\u0001"+ + "\u0000\u0000\u0000\u0160\u0161\u0001\u0000\u0000\u0000\u0161\u0164\u0001"+ + "\u0000\u0000\u0000\u0162\u0163\u0005!\u0000\u0000\u0163\u0165\u0003\u001a"+ + "\r\u0000\u0164\u0162\u0001\u0000\u0000\u0000\u0164\u0165\u0001\u0000\u0000"+ + "\u0000\u01651\u0001\u0000\u0000\u0000\u0166\u0167\u0005\b\u0000\u0000"+ + "\u0167\u016a\u0003\u001a\r\u0000\u0168\u0169\u0005!\u0000\u0000\u0169"+ + "\u016b\u0003\u001a\r\u0000\u016a\u0168\u0001\u0000\u0000\u0000\u016a\u016b"+ + "\u0001\u0000\u0000\u0000\u016b3\u0001\u0000\u0000\u0000\u016c\u0171\u0003"+ + ":\u001d\u0000\u016d\u016e\u0005(\u0000\u0000\u016e\u0170\u0003:\u001d"+ + "\u0000\u016f\u016d\u0001\u0000\u0000\u0000\u0170\u0173\u0001\u0000\u0000"+ + "\u0000\u0171\u016f\u0001\u0000\u0000\u0000\u0171\u0172\u0001\u0000\u0000"+ + "\u0000\u01725\u0001\u0000\u0000\u0000\u0173\u0171\u0001\u0000\u0000\u0000"+ + "\u0174\u0179\u0003<\u001e\u0000\u0175\u0176\u0005(\u0000\u0000\u0176\u0178"+ + "\u0003<\u001e\u0000\u0177\u0175\u0001\u0000\u0000\u0000\u0178\u017b\u0001"+ + "\u0000\u0000\u0000\u0179\u0177\u0001\u0000\u0000\u0000\u0179\u017a\u0001"+ + "\u0000\u0000\u0000\u017a7\u0001\u0000\u0000\u0000\u017b\u0179\u0001\u0000"+ + "\u0000\u0000\u017c\u0181\u00036\u001b\u0000\u017d\u017e\u0005&\u0000\u0000"+ + "\u017e\u0180\u00036\u001b\u0000\u017f\u017d\u0001\u0000\u0000\u0000\u0180"+ + "\u0183\u0001\u0000\u0000\u0000\u0181\u017f\u0001\u0000\u0000\u0000\u0181"+ + "\u0182\u0001\u0000\u0000\u0000\u01829\u0001\u0000\u0000\u0000\u0183\u0181"+ + "\u0001\u0000\u0000\u0000\u0184\u0185\u0007\u0003\u0000\u0000\u0185;\u0001"+ + "\u0000\u0000\u0000\u0186\u0187\u0005P\u0000\u0000\u0187=\u0001\u0000\u0000"+ + "\u0000\u0188\u01b3\u00051\u0000\u0000\u0189\u018a\u0003`0\u0000\u018a"+ + "\u018b\u0005G\u0000\u0000\u018b\u01b3\u0001\u0000\u0000\u0000\u018c\u01b3"+ + "\u0003^/\u0000\u018d\u01b3\u0003`0\u0000\u018e\u01b3\u0003Z-\u0000\u018f"+ + "\u01b3\u0003@ \u0000\u0190\u01b3\u0003b1\u0000\u0191\u0192\u0005E\u0000"+ + "\u0000\u0192\u0197\u0003\\.\u0000\u0193\u0194\u0005&\u0000\u0000\u0194"+ + "\u0196\u0003\\.\u0000\u0195\u0193\u0001\u0000\u0000\u0000\u0196\u0199"+ + "\u0001\u0000\u0000\u0000\u0197\u0195\u0001\u0000\u0000\u0000\u0197\u0198"+ + "\u0001\u0000\u0000\u0000\u0198\u019a\u0001\u0000\u0000\u0000\u0199\u0197"+ + "\u0001\u0000\u0000\u0000\u019a\u019b\u0005F\u0000\u0000\u019b\u01b3\u0001"+ + "\u0000\u0000\u0000\u019c\u019d\u0005E\u0000\u0000\u019d\u01a2\u0003Z-"+ + "\u0000\u019e\u019f\u0005&\u0000\u0000\u019f\u01a1\u0003Z-\u0000\u01a0"+ + "\u019e\u0001\u0000\u0000\u0000\u01a1\u01a4\u0001\u0000\u0000\u0000\u01a2"+ + "\u01a0\u0001\u0000\u0000\u0000\u01a2\u01a3\u0001\u0000\u0000\u0000\u01a3"+ + "\u01a5\u0001\u0000\u0000\u0000\u01a4\u01a2\u0001\u0000\u0000\u0000\u01a5"+ + "\u01a6\u0005F\u0000\u0000\u01a6\u01b3\u0001\u0000\u0000\u0000\u01a7\u01a8"+ + "\u0005E\u0000\u0000\u01a8\u01ad\u0003b1\u0000\u01a9\u01aa\u0005&\u0000"+ + "\u0000\u01aa\u01ac\u0003b1\u0000\u01ab\u01a9\u0001\u0000\u0000\u0000\u01ac"+ + "\u01af\u0001\u0000\u0000\u0000\u01ad\u01ab\u0001\u0000\u0000\u0000\u01ad"+ + "\u01ae\u0001\u0000\u0000\u0000\u01ae\u01b0\u0001\u0000\u0000\u0000\u01af"+ + "\u01ad\u0001\u0000\u0000\u0000\u01b0\u01b1\u0005F\u0000\u0000\u01b1\u01b3"+ + "\u0001\u0000\u0000\u0000\u01b2\u0188\u0001\u0000\u0000\u0000\u01b2\u0189"+ + "\u0001\u0000\u0000\u0000\u01b2\u018c\u0001\u0000\u0000\u0000\u01b2\u018d"+ + "\u0001\u0000\u0000\u0000\u01b2\u018e\u0001\u0000\u0000\u0000\u01b2\u018f"+ + "\u0001\u0000\u0000\u0000\u01b2\u0190\u0001\u0000\u0000\u0000\u01b2\u0191"+ + "\u0001\u0000\u0000\u0000\u01b2\u019c\u0001\u0000\u0000\u0000\u01b2\u01a7"+ + "\u0001\u0000\u0000\u0000\u01b3?\u0001\u0000\u0000\u0000\u01b4\u01b7\u0005"+ + "4\u0000\u0000\u01b5\u01b7\u0005D\u0000\u0000\u01b6\u01b4\u0001\u0000\u0000"+ + "\u0000\u01b6\u01b5\u0001\u0000\u0000\u0000\u01b7A\u0001\u0000\u0000\u0000"+ + "\u01b8\u01b9\u0005\n\u0000\u0000\u01b9\u01ba\u0005\u001f\u0000\u0000\u01ba"+ + "C\u0001\u0000\u0000\u0000\u01bb\u01bc\u0005\u0012\u0000\u0000\u01bc\u01c1"+ + "\u0003F#\u0000\u01bd\u01be\u0005&\u0000\u0000\u01be\u01c0\u0003F#\u0000"+ + "\u01bf\u01bd\u0001\u0000\u0000\u0000\u01c0\u01c3\u0001\u0000\u0000\u0000"+ + "\u01c1\u01bf\u0001\u0000\u0000\u0000\u01c1\u01c2\u0001\u0000\u0000\u0000"+ + "\u01c2E\u0001\u0000\u0000\u0000\u01c3\u01c1\u0001\u0000\u0000\u0000\u01c4"+ + "\u01c6\u0003\n\u0005\u0000\u01c5\u01c7\u0007\u0004\u0000\u0000\u01c6\u01c5"+ + "\u0001\u0000\u0000\u0000\u01c6\u01c7\u0001\u0000\u0000\u0000\u01c7\u01ca"+ + "\u0001\u0000\u0000\u0000\u01c8\u01c9\u00052\u0000\u0000\u01c9\u01cb\u0007"+ + "\u0005\u0000\u0000\u01ca\u01c8\u0001\u0000\u0000\u0000\u01ca\u01cb\u0001"+ + "\u0000\u0000\u0000\u01cbG\u0001\u0000\u0000\u0000\u01cc\u01cd\u0005\t"+ + "\u0000\u0000\u01cd\u01ce\u00038\u001c\u0000\u01ceI\u0001\u0000\u0000\u0000"+ + "\u01cf\u01d0\u0005\u0002\u0000\u0000\u01d0\u01d1\u00038\u001c\u0000\u01d1"+ + "K\u0001\u0000\u0000\u0000\u01d2\u01d3\u0005\u000f\u0000\u0000\u01d3\u01d8"+ + "\u0003N\'\u0000\u01d4\u01d5\u0005&\u0000\u0000\u01d5\u01d7\u0003N\'\u0000"+ + "\u01d6\u01d4\u0001\u0000\u0000\u0000\u01d7\u01da\u0001\u0000\u0000\u0000"+ + "\u01d8\u01d6\u0001\u0000\u0000\u0000\u01d8\u01d9\u0001\u0000\u0000\u0000"+ + "\u01d9M\u0001\u0000\u0000\u0000\u01da\u01d8\u0001\u0000\u0000\u0000\u01db"+ + "\u01dc\u00036\u001b\u0000\u01dc\u01dd\u0005T\u0000\u0000\u01dd\u01de\u0003"+ + "6\u001b\u0000\u01deO\u0001\u0000\u0000\u0000\u01df\u01e0\u0005\u0001\u0000"+ + "\u0000\u01e0\u01e1\u0003\u0012\t\u0000\u01e1\u01e3\u0003b1\u0000\u01e2"+ + "\u01e4\u0003V+\u0000\u01e3\u01e2\u0001\u0000\u0000\u0000\u01e3\u01e4\u0001"+ + "\u0000\u0000\u0000\u01e4Q\u0001\u0000\u0000\u0000\u01e5\u01e6\u0005\u0007"+ + "\u0000\u0000\u01e6\u01e7\u0003\u0012\t\u0000\u01e7\u01e8\u0003b1\u0000"+ + "\u01e8S\u0001\u0000\u0000\u0000\u01e9\u01ea\u0005\u000e\u0000\u0000\u01ea"+ + "\u01eb\u00034\u001a\u0000\u01ebU\u0001\u0000\u0000\u0000\u01ec\u01f1\u0003"+ + "X,\u0000\u01ed\u01ee\u0005&\u0000\u0000\u01ee\u01f0\u0003X,\u0000\u01ef"+ + "\u01ed\u0001\u0000\u0000\u0000\u01f0\u01f3\u0001\u0000\u0000\u0000\u01f1"+ + "\u01ef\u0001\u0000\u0000\u0000\u01f1\u01f2\u0001\u0000\u0000\u0000\u01f2"+ + "W\u0001\u0000\u0000\u0000\u01f3\u01f1\u0001\u0000\u0000\u0000\u01f4\u01f5"+ + "\u0003:\u001d\u0000\u01f5\u01f6\u0005$\u0000\u0000\u01f6\u01f7\u0003>"+ + "\u001f\u0000\u01f7Y\u0001\u0000\u0000\u0000\u01f8\u01f9\u0007\u0006\u0000"+ + "\u0000\u01f9[\u0001\u0000\u0000\u0000\u01fa\u01fd\u0003^/\u0000\u01fb"+ + "\u01fd\u0003`0\u0000\u01fc\u01fa\u0001\u0000\u0000\u0000\u01fc\u01fb\u0001"+ + "\u0000\u0000\u0000\u01fd]\u0001\u0000\u0000\u0000\u01fe\u0200\u0007\u0000"+ + "\u0000\u0000\u01ff\u01fe\u0001\u0000\u0000\u0000\u01ff\u0200\u0001\u0000"+ + "\u0000\u0000\u0200\u0201\u0001\u0000\u0000\u0000\u0201\u0202\u0005 \u0000"+ + "\u0000\u0202_\u0001\u0000\u0000\u0000\u0203\u0205\u0007\u0000\u0000\u0000"+ + "\u0204\u0203\u0001\u0000\u0000\u0000\u0204\u0205\u0001\u0000\u0000\u0000"+ + "\u0205\u0206\u0001\u0000\u0000\u0000\u0206\u0207\u0005\u001f\u0000\u0000"+ + "\u0207a\u0001\u0000\u0000\u0000\u0208\u0209\u0005\u001e\u0000\u0000\u0209"+ + "c\u0001\u0000\u0000\u0000\u020a\u020b\u0007\u0007\u0000\u0000\u020be\u0001"+ + "\u0000\u0000\u0000\u020c\u020d\u0005\u0005\u0000\u0000\u020d\u020e\u0003"+ + "h4\u0000\u020eg\u0001\u0000\u0000\u0000\u020f\u0210\u0005E\u0000\u0000"+ + "\u0210\u0211\u0003\u0002\u0001\u0000\u0211\u0212\u0005F\u0000\u0000\u0212"+ + "i\u0001\u0000\u0000\u0000\u0213\u0214\u0005\u0011\u0000\u0000\u0214\u0215"+ + "\u0005j\u0000\u0000\u0215k\u0001\u0000\u0000\u0000\u0216\u0217\u0005\f"+ + "\u0000\u0000\u0217\u0218\u0005n\u0000\u0000\u0218m\u0001\u0000\u0000\u0000"+ + "\u0219\u021a\u0005\u0003\u0000\u0000\u021a\u021d\u0005Z\u0000\u0000\u021b"+ + "\u021c\u0005X\u0000\u0000\u021c\u021e\u00036\u001b\u0000\u021d\u021b\u0001"+ + "\u0000\u0000\u0000\u021d\u021e\u0001\u0000\u0000\u0000\u021e\u0228\u0001"+ + "\u0000\u0000\u0000\u021f\u0220\u0005Y\u0000\u0000\u0220\u0225\u0003p8"+ + "\u0000\u0221\u0222\u0005&\u0000\u0000\u0222\u0224\u0003p8\u0000\u0223"+ + "\u0221\u0001\u0000\u0000\u0000\u0224\u0227\u0001\u0000\u0000\u0000\u0225"+ + "\u0223\u0001\u0000\u0000\u0000\u0225\u0226\u0001\u0000\u0000\u0000\u0226"+ + "\u0229\u0001\u0000\u0000\u0000\u0227\u0225\u0001\u0000\u0000\u0000\u0228"+ + "\u021f\u0001\u0000\u0000\u0000\u0228\u0229\u0001\u0000\u0000\u0000\u0229"+ + "o\u0001\u0000\u0000\u0000\u022a\u022b\u00036\u001b\u0000\u022b\u022c\u0005"+ + "$\u0000\u0000\u022c\u022e\u0001\u0000\u0000\u0000\u022d\u022a\u0001\u0000"+ + "\u0000\u0000\u022d\u022e\u0001\u0000\u0000\u0000\u022e\u022f\u0001\u0000"+ + "\u0000\u0000\u022f\u0230\u00036\u001b\u0000\u0230q\u0001\u0000\u0000\u0000"+ + "\u0231\u0232\u0005\u000b\u0000\u0000\u0232\u0233\u0003 \u0010\u0000\u0233"+ + "\u0234\u0005X\u0000\u0000\u0234\u0235\u00038\u001c\u0000\u0235s\u0001"+ + "\u0000\u0000\u00006\u007f\u0088\u0098\u00a4\u00ad\u00b5\u00b9\u00c1\u00c3"+ + "\u00c8\u00cf\u00d4\u00db\u00e1\u00e9\u00eb\u00f6\u00fd\u0108\u010b\u0119"+ + "\u0121\u0129\u012d\u0134\u013c\u0144\u0151\u0155\u0159\u0160\u0164\u016a"+ + "\u0171\u0179\u0181\u0197\u01a2\u01ad\u01b2\u01b6\u01c1\u01c6\u01ca\u01d8"+ + "\u01e3\u01f1\u01fc\u01ff\u0204\u021d\u0225\u0228\u022d"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java index 0da4c187a3d43..c2c682e0eea17 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java @@ -365,13 +365,37 @@ public class EsqlBaseParserBaseListener implements EsqlBaseParserListener { * *

      The default implementation does nothing.

      */ - @Override public void enterIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx) { } + @Override public void enterIndexPattern(EsqlBaseParser.IndexPatternContext ctx) { } /** * {@inheritDoc} * *

      The default implementation does nothing.

      */ - @Override public void exitIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx) { } + @Override public void exitIndexPattern(EsqlBaseParser.IndexPatternContext ctx) { } + /** + * {@inheritDoc} + * + *

      The default implementation does nothing.

      + */ + @Override public void enterClusterString(EsqlBaseParser.ClusterStringContext ctx) { } + /** + * {@inheritDoc} + * + *

      The default implementation does nothing.

      + */ + @Override public void exitClusterString(EsqlBaseParser.ClusterStringContext ctx) { } + /** + * {@inheritDoc} + * + *

      The default implementation does nothing.

      + */ + @Override public void enterIndexString(EsqlBaseParser.IndexStringContext ctx) { } + /** + * {@inheritDoc} + * + *

      The default implementation does nothing.

      + */ + @Override public void exitIndexString(EsqlBaseParser.IndexStringContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java index ea1c9aca99880..3b2675d3490a2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java @@ -221,7 +221,21 @@ public class EsqlBaseParserBaseVisitor extends AbstractParseTreeVisitor im *

      The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

      */ - @Override public T visitIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx) { return visitChildren(ctx); } + @Override public T visitIndexPattern(EsqlBaseParser.IndexPatternContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

      The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

      + */ + @Override public T visitClusterString(EsqlBaseParser.ClusterStringContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

      The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

      + */ + @Override public T visitIndexString(EsqlBaseParser.IndexStringContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java index 081deb03e8354..a6420e6fadebd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java @@ -336,15 +336,35 @@ public interface EsqlBaseParserListener extends ParseTreeListener { */ void exitFromCommand(EsqlBaseParser.FromCommandContext ctx); /** - * Enter a parse tree produced by {@link EsqlBaseParser#indexIdentifier}. + * Enter a parse tree produced by {@link EsqlBaseParser#indexPattern}. * @param ctx the parse tree */ - void enterIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx); + void enterIndexPattern(EsqlBaseParser.IndexPatternContext ctx); /** - * Exit a parse tree produced by {@link EsqlBaseParser#indexIdentifier}. + * Exit a parse tree produced by {@link EsqlBaseParser#indexPattern}. * @param ctx the parse tree */ - void exitIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx); + void exitIndexPattern(EsqlBaseParser.IndexPatternContext ctx); + /** + * Enter a parse tree produced by {@link EsqlBaseParser#clusterString}. + * @param ctx the parse tree + */ + void enterClusterString(EsqlBaseParser.ClusterStringContext ctx); + /** + * Exit a parse tree produced by {@link EsqlBaseParser#clusterString}. + * @param ctx the parse tree + */ + void exitClusterString(EsqlBaseParser.ClusterStringContext ctx); + /** + * Enter a parse tree produced by {@link EsqlBaseParser#indexString}. + * @param ctx the parse tree + */ + void enterIndexString(EsqlBaseParser.IndexStringContext ctx); + /** + * Exit a parse tree produced by {@link EsqlBaseParser#indexString}. + * @param ctx the parse tree + */ + void exitIndexString(EsqlBaseParser.IndexStringContext ctx); /** * Enter a parse tree produced by {@link EsqlBaseParser#metadata}. * @param ctx the parse tree diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java index d1ffbd5fa0b32..ec84b7234d67e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java @@ -204,11 +204,23 @@ public interface EsqlBaseParserVisitor extends ParseTreeVisitor { */ T visitFromCommand(EsqlBaseParser.FromCommandContext ctx); /** - * Visit a parse tree produced by {@link EsqlBaseParser#indexIdentifier}. + * Visit a parse tree produced by {@link EsqlBaseParser#indexPattern}. * @param ctx the parse tree * @return the visitor result */ - T visitIndexIdentifier(EsqlBaseParser.IndexIdentifierContext ctx); + T visitIndexPattern(EsqlBaseParser.IndexPatternContext ctx); + /** + * Visit a parse tree produced by {@link EsqlBaseParser#clusterString}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitClusterString(EsqlBaseParser.ClusterStringContext ctx); + /** + * Visit a parse tree produced by {@link EsqlBaseParser#indexString}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitIndexString(EsqlBaseParser.IndexStringContext ctx); /** * Visit a parse tree produced by {@link EsqlBaseParser#metadata}. * @param ctx the parse tree diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java index 56822386b2954..ebbcfa3b2863b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java @@ -20,10 +20,9 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.esql.core.parser.CaseChangingCharStream; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.BitSet; -import java.util.Map; import java.util.function.BiFunction; import java.util.function.Function; @@ -35,7 +34,7 @@ public class EsqlParser { private static final Logger log = LogManager.getLogger(EsqlParser.class); public LogicalPlan createStatement(String query) { - return createStatement(query, QueryParams.EMPTY); + return createStatement(query, new QueryParams()); } public LogicalPlan createStatement(String query, QueryParams params) { @@ -52,14 +51,12 @@ private T invokeParser( BiFunction result ) { try { - EsqlBaseLexer lexer = new EsqlBaseLexer(new CaseChangingCharStream(CharStreams.fromString(query), false)); + EsqlBaseLexer lexer = new EsqlBaseLexer(new CaseChangingCharStream(CharStreams.fromString(query))); lexer.removeErrorListeners(); lexer.addErrorListener(ERROR_LISTENER); - Map positionalParamTokens = params.positionalParamTokens(); - TokenSource tokenSource = new ParametrizedTokenSource(lexer, positionalParamTokens, params); - + TokenSource tokenSource = new ParametrizedTokenSource(lexer, params); CommonTokenStream tokenStream = new CommonTokenStream(tokenSource); EsqlBaseParser parser = new EsqlBaseParser(tokenStream); @@ -117,20 +114,17 @@ public void syntaxError( * with actual values. */ private static class ParametrizedTokenSource implements TokenSource { + private static String message = "Inconsistent parameter declaration, " + + "use one of positional, named or anonymous params but not a combination of "; private TokenSource delegate; - private Map paramTokens; - private int param; private QueryParams params; private BitSet paramTypes = new BitSet(3); - private static String message = "Inconsistent parameter declaration, " - + "use one of positional, named or anonymous params but not a combination of "; + private int param = 1; - ParametrizedTokenSource(TokenSource delegate, Map paramTokens, QueryParams params) { + ParametrizedTokenSource(TokenSource delegate, QueryParams params) { this.delegate = delegate; - this.paramTokens = paramTokens; this.params = params; - param = 0; } @Override @@ -138,10 +132,10 @@ public Token nextToken() { Token token = delegate.nextToken(); if (token.getType() == EsqlBaseLexer.PARAM) { checkAnonymousParam(token); - if (param >= params.positionalParams().size()) { - throw new ParsingException(source(token), "Not enough actual parameters {}", params.positionalParams().size()); + if (param > params.size()) { + throw new ParsingException(source(token), "Not enough actual parameters {}", params.size()); } - paramTokens.put(token, params.positionalParams().get(param)); + params.addTokenParam(token, params.get(param)); param++; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 41db2aa54387b..9769d286b484d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -25,8 +25,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionResolutionStrategy; -import org.elasticsearch.xpack.esql.core.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; @@ -42,6 +40,8 @@ import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.expression.function.FunctionResolutionStrategy; +import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; @@ -226,7 +226,7 @@ public Literal visitStringLiteral(EsqlBaseParser.StringLiteralContext ctx) { @Override public Literal visitString(EsqlBaseParser.StringContext ctx) { Source source = source(ctx); - return new Literal(source, unquoteString(source), DataType.KEYWORD); + return new Literal(source, unquote(source), DataType.KEYWORD); } @Override @@ -729,7 +729,7 @@ QueryParam paramByNameOrPosition(TerminalNode node) { int index = Integer.parseInt(nameOrPosition); if (params.get(index) == null) { String message = ""; - int np = params.positionalParams().size(); + int np = params.size(); if (np > 0) { message = ", did you mean " + (np == 1 ? "position 1?" : "any position between 1 and " + np + "?"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/IdentifierBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/IdentifierBuilder.java index e626f502f5413..9ccbb00ea4b5b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/IdentifierBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/IdentifierBuilder.java @@ -10,11 +10,12 @@ import org.antlr.v4.runtime.tree.TerminalNode; import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.esql.parser.EsqlBaseParser.IdentifierContext; -import org.elasticsearch.xpack.esql.parser.EsqlBaseParser.IndexIdentifierContext; +import org.elasticsearch.xpack.esql.parser.EsqlBaseParser.IndexStringContext; +import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.xpack.esql.core.parser.ParserUtils.visitList; +import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR; abstract class IdentifierBuilder extends AbstractBuilder { @@ -23,11 +24,6 @@ public String visitIdentifier(IdentifierContext ctx) { return ctx == null ? null : unquoteIdentifier(ctx.QUOTED_IDENTIFIER(), ctx.UNQUOTED_IDENTIFIER()); } - @Override - public String visitIndexIdentifier(IndexIdentifierContext ctx) { - return ctx == null ? null : unquoteIdentifier(null, ctx.INDEX_UNQUOTED_IDENTIFIER()); - } - protected static String unquoteIdentifier(TerminalNode quotedNode, TerminalNode unquotedNode) { String result; if (quotedNode != null) { @@ -42,7 +38,20 @@ protected static String unquoteIdString(String quotedString) { return quotedString.substring(1, quotedString.length() - 1).replace("``", "`"); } - public String visitIndexIdentifiers(List ctx) { - return Strings.collectionToDelimitedString(visitList(this, ctx, String.class), ","); + @Override + public String visitIndexString(IndexStringContext ctx) { + TerminalNode n = ctx.UNQUOTED_SOURCE(); + return n != null ? n.getText() : unquote(ctx.QUOTED_STRING().getText()); + } + + public String visitIndexPattern(List ctx) { + List patterns = new ArrayList<>(ctx.size()); + ctx.forEach(c -> { + String indexPattern = visitIndexString(c.indexString()); + patterns.add( + c.clusterString() != null ? c.clusterString().getText() + REMOTE_CLUSTER_INDEX_SEPARATOR + indexPattern : indexPattern + ); + }); + return Strings.collectionToDelimitedString(patterns, ","); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index f829a7cb6ed00..f2603eedf8b84 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -16,7 +16,7 @@ import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; @@ -29,32 +29,32 @@ import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.core.parser.ParserUtils; import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; +import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.parser.EsqlBaseParser.MetadataOptionContext; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsqlAggregate; -import org.elasticsearch.xpack.esql.plan.logical.EsqlUnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Explain; +import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.Keep; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Rename; import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.meta.MetaFunctions; import org.elasticsearch.xpack.esql.plan.logical.show.ShowInfo; @@ -92,15 +92,18 @@ public LogicalPlanBuilder(QueryParams params) { protected LogicalPlan plan(ParseTree ctx) { LogicalPlan p = ParserUtils.typedParsing(this, ctx, LogicalPlan.class); var errors = this.params.parsingErrors(); - if (errors.isEmpty()) { + if (errors.hasNext() == false) { return p; } else { StringBuilder message = new StringBuilder(); - for (int i = 0; i < errors.size(); i++) { + int i = 0; + + while (errors.hasNext()) { if (i > 0) { message.append("; "); } - message.append(errors.get(i).getMessage()); + message.append(errors.next().getMessage()); + i++; } throw new ParsingException(message.toString()); } @@ -142,12 +145,30 @@ public PlanFactory visitEvalCommand(EsqlBaseParser.EvalCommandContext ctx) { @Override public PlanFactory visitGrokCommand(EsqlBaseParser.GrokCommandContext ctx) { return p -> { + Source source = source(ctx); String pattern = visitString(ctx.string()).fold().toString(); - Grok result = new Grok(source(ctx), p, expression(ctx.primaryExpression()), Grok.pattern(source(ctx), pattern)); + Grok.Parser grokParser = Grok.pattern(source, pattern); + validateGrokPattern(source, grokParser, pattern); + Grok result = new Grok(source(ctx), p, expression(ctx.primaryExpression()), grokParser); return result; }; } + private void validateGrokPattern(Source source, Grok.Parser grokParser, String pattern) { + Map definedAttributes = new HashMap<>(); + for (Attribute field : grokParser.extractedFields()) { + String name = field.name(); + DataType type = field.dataType(); + DataType prev = definedAttributes.put(name, type); + if (prev != null) { + throw new ParsingException( + source, + "Invalid GROK pattern [" + pattern + "]: the attribute [" + name + "] is defined multiple times with different types" + ); + } + } + } + @Override public PlanFactory visitDissectCommand(EsqlBaseParser.DissectCommandContext ctx) { return p -> { @@ -220,7 +241,7 @@ public LogicalPlan visitRowCommand(EsqlBaseParser.RowCommandContext ctx) { @Override public LogicalPlan visitFromCommand(EsqlBaseParser.FromCommandContext ctx) { Source source = source(ctx); - TableIdentifier table = new TableIdentifier(source, null, visitIndexIdentifiers(ctx.indexIdentifier())); + TableIdentifier table = new TableIdentifier(source, null, visitIndexPattern(ctx.indexPattern())); Map metadataMap = new LinkedHashMap<>(); if (ctx.metadata() != null) { var deprecatedContext = ctx.metadata().deprecated_metadata(); @@ -237,8 +258,8 @@ public LogicalPlan visitFromCommand(EsqlBaseParser.FromCommandContext ctx) { metadataOptionContext = ctx.metadata().metadataOption(); } - for (var c : metadataOptionContext.indexIdentifier()) { - String id = visitIndexIdentifier(c); + for (var c : metadataOptionContext.UNQUOTED_SOURCE()) { + String id = c.getText(); Source src = source(c); if (MetadataAttribute.isSupported(id) == false) { throw new ParsingException(src, "unsupported metadata field [" + id + "]"); @@ -249,13 +270,20 @@ public LogicalPlan visitFromCommand(EsqlBaseParser.FromCommandContext ctx) { } } } - return new EsqlUnresolvedRelation(source, table, Arrays.asList(metadataMap.values().toArray(Attribute[]::new)), IndexMode.STANDARD); + return new UnresolvedRelation( + source, + table, + false, + List.of(metadataMap.values().toArray(Attribute[]::new)), + IndexMode.STANDARD, + null + ); } @Override public PlanFactory visitStatsCommand(EsqlBaseParser.StatsCommandContext ctx) { final Stats stats = stats(source(ctx), ctx.grouping, ctx.stats); - return input -> new EsqlAggregate(source(ctx), input, stats.groupings, stats.aggregates); + return input -> new Aggregate(source(ctx), input, Aggregate.AggregateType.STANDARD, stats.groupings, stats.aggregates); } private record Stats(List groupings, List aggregates) { @@ -434,13 +462,21 @@ public LogicalPlan visitMetricsCommand(EsqlBaseParser.MetricsCommandContext ctx) throw new IllegalArgumentException("METRICS command currently requires a snapshot build"); } Source source = source(ctx); - TableIdentifier table = new TableIdentifier(source, null, visitIndexIdentifiers(ctx.indexIdentifier())); - var unresolvedRelation = new EsqlUnresolvedRelation(source, table, List.of(), IndexMode.TIME_SERIES); + TableIdentifier table = new TableIdentifier(source, null, visitIndexPattern(ctx.indexPattern())); + if (ctx.aggregates == null && ctx.grouping == null) { - return unresolvedRelation; + return new UnresolvedRelation(source, table, false, List.of(), IndexMode.STANDARD, null); } final Stats stats = stats(source, ctx.grouping, ctx.aggregates); - return new EsqlAggregate(source, unresolvedRelation, stats.groupings, stats.aggregates); + var relation = new UnresolvedRelation( + source, + table, + false, + List.of(new MetadataAttribute(source, MetadataAttribute.TSID_FIELD, DataType.KEYWORD, false)), + IndexMode.TIME_SERIES, + null + ); + return new Aggregate(source, relation, Aggregate.AggregateType.METRICS, stats.groupings, stats.aggregates); } @Override @@ -450,14 +486,20 @@ public PlanFactory visitLookupCommand(EsqlBaseParser.LookupCommandContext ctx) { } var source = source(ctx); - List matchFields = visitQualifiedNamePatterns(ctx.qualifiedNamePatterns(), ne -> { + @SuppressWarnings("unchecked") + List matchFields = (List) (List) visitQualifiedNamePatterns(ctx.qualifiedNamePatterns(), ne -> { if (ne instanceof UnresolvedNamePattern || ne instanceof UnresolvedStar) { var src = ne.source(); throw new ParsingException(src, "Using wildcards [*] in LOOKUP ON is not allowed yet [{}]", src.text()); } + if ((ne instanceof UnresolvedAttribute) == false) { + throw new IllegalStateException( + "visitQualifiedNamePatterns can only return UnresolvedNamePattern, UnresolvedStar or UnresolvedAttribute" + ); + } }); - Literal tableName = new Literal(source, ctx.tableName.getText(), DataType.KEYWORD); + Literal tableName = new Literal(source, visitIndexPattern(List.of(ctx.indexPattern())), DataType.KEYWORD); return p -> new Lookup(source, p, tableName, matchFields, null /* localRelation will be resolved later*/); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/QueryParams.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/QueryParams.java index ebba6d3d0b482..0043e5fcce2e6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/QueryParams.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/QueryParams.java @@ -10,43 +10,54 @@ import org.antlr.v4.runtime.Token; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; public class QueryParams { - public static final QueryParams EMPTY = new QueryParams(); + private final List params; // This matches the named or unnamed parameters specified in an EsqlQueryRequest.params + private final Map nameToParam; // This matches the named parameters specified in an EsqlQueryRequest.params + private Map tokenToParam; // This is populated by EsqlParser, each parameter marker has an entry + private List parsingErrors; + private final int paramsCount; - // This matches the named or unnamed parameters specified in an EsqlQueryRequest.params. - private List params = new ArrayList<>(); - - // This matches the named parameters specified in an EsqlQueryRequest.params. - private Map nameToParam = new HashMap<>(); - - // This is populated by EsqlParser, each parameter marker has an entry. - private Map tokenToParam = new HashMap<>(); - - private List parsingErrors = new ArrayList<>(); - - public QueryParams() {} + public QueryParams() { + this(null); + } public QueryParams(List params) { - for (QueryParam p : params) { - this.params.add(p); - String name = p.name(); - if (name != null) { - nameToParam.put(name, p); + this.tokenToParam = new HashMap<>(); + this.parsingErrors = new ArrayList<>(); + + if (params == null || params.isEmpty()) { + this.params = List.of(); + this.nameToParam = Map.of(); + this.paramsCount = 0; + } else { + this.paramsCount = params.size(); + this.params = new ArrayList<>(paramsCount); + Map tempNameToParam = new HashMap<>(paramsCount); + for (QueryParam p : params) { + this.params.add(p); + String name = p.name(); + if (name != null) { + tempNameToParam.put(name, p); + } } + this.nameToParam = Collections.unmodifiableMap(tempNameToParam); } } - public List positionalParams() { - return this.params; + public int size() { + return this.paramsCount; } public QueryParam get(int index) { - return (index <= 0 || index > params.size()) ? null : params.get(index - 1); + return (index <= 0 || index > this.paramsCount) ? null : params.get(index - 1); } public Map namedParams() { @@ -61,10 +72,6 @@ public QueryParam get(String paramName) { return nameToParam.get(paramName); } - public Map positionalParamTokens() { - return this.tokenToParam; - } - public boolean contains(Token token) { return this.tokenToParam.containsKey(token); } @@ -73,11 +80,32 @@ public QueryParam get(Token tokenLocation) { return this.tokenToParam.get(tokenLocation); } - public List parsingErrors() { - return this.parsingErrors; + public void addTokenParam(Token token, QueryParam param) { + this.tokenToParam.put(token, param); + } + + public Iterator parsingErrors() { + return this.parsingErrors.iterator(); } public void addParsingError(ParsingException e) { this.parsingErrors.add(e); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueryParams that = (QueryParams) o; + return paramsCount == that.paramsCount + && params.equals(that.params) + && nameToParam.equals(that.nameToParam) + && tokenToParam.equals(that.tokenToParam) + && parsingErrors.equals(that.parsingErrors); + } + + @Override + public int hashCode() { + return Objects.hash(params, nameToParam, tokenToParam, parsingErrors, paramsCount); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java index 8827e843939b6..78d77baa57aac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java @@ -6,38 +6,98 @@ */ package org.elasticsearch.xpack.esql.plan.logical; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import java.io.IOException; import java.util.List; import java.util.Objects; +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; + public class Aggregate extends UnaryPlan { + private List lazyOutput; + + public enum AggregateType { + STANDARD, + // include metrics aggregates such as rates + METRICS; + + static void writeType(StreamOutput out, AggregateType type) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ADD_AGGREGATE_TYPE)) { + out.writeString(type.name()); + } else if (type != STANDARD) { + throw new IllegalStateException("cluster is not ready to support aggregate type [" + type + "]"); + } + } + + static AggregateType readType(StreamInput in) throws IOException { + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ADD_AGGREGATE_TYPE)) { + return AggregateType.valueOf(in.readString()); + } else { + return STANDARD; + } + } + } + private final AggregateType aggregateType; private final List groupings; private final List aggregates; - public Aggregate(Source source, LogicalPlan child, List groupings, List aggregates) { + public Aggregate( + Source source, + LogicalPlan child, + AggregateType aggregateType, + List groupings, + List aggregates + ) { super(source, child); + this.aggregateType = aggregateType; this.groupings = groupings; this.aggregates = aggregates; } + public Aggregate(PlanStreamInput in) throws IOException { + this( + Source.readFrom(in), + in.readLogicalPlanNode(), + AggregateType.readType(in), + in.readNamedWriteableCollectionAsList(Expression.class), + in.readNamedWriteableCollectionAsList(NamedExpression.class) + ); + } + + public static void writeAggregate(PlanStreamOutput out, Aggregate aggregate) throws IOException { + Source.EMPTY.writeTo(out); + out.writeLogicalPlanNode(aggregate.child()); + AggregateType.writeType(out, aggregate.aggregateType()); + out.writeNamedWriteableCollection(aggregate.groupings); + out.writeNamedWriteableCollection(aggregate.aggregates()); + } + @Override protected NodeInfo info() { - return NodeInfo.create(this, Aggregate::new, child(), groupings, aggregates); + return NodeInfo.create(this, Aggregate::new, child(), aggregateType, groupings, aggregates); } @Override public Aggregate replaceChild(LogicalPlan newChild) { - return new Aggregate(source(), newChild, groupings, aggregates); + return new Aggregate(source(), newChild, aggregateType, groupings, aggregates); + } + + public AggregateType aggregateType() { + return aggregateType; } public List groupings() { @@ -55,12 +115,15 @@ public boolean expressionsResolved() { @Override public List output() { - return Expressions.asAttributes(aggregates); + if (lazyOutput == null) { + lazyOutput = mergeOutputAttributes(Expressions.asAttributes(aggregates()), emptyList()); + } + return lazyOutput; } @Override public int hashCode() { - return Objects.hash(groupings, aggregates, child()); + return Objects.hash(aggregateType, groupings, aggregates, child()); } @Override @@ -74,7 +137,8 @@ public boolean equals(Object obj) { } Aggregate other = (Aggregate) obj; - return Objects.equals(groupings, other.groupings) + return aggregateType == other.aggregateType + && Objects.equals(groupings, other.groupings) && Objects.equals(aggregates, other.aggregates) && Objects.equals(child(), other.child()); } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/BinaryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java similarity index 95% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/BinaryPlan.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java index 051c3d7946b4b..579b67eb891ac 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/BinaryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.plan.logical; +package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java index 1307d1870bba4..c0c564b1b36eb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Dissect.java @@ -10,8 +10,6 @@ import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java index 2946287ae21f0..d1c5d70018d91 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Drop.java @@ -9,8 +9,6 @@ import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java index f418ab5da1c9d..a4d553eae4749 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java @@ -14,8 +14,6 @@ import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java index 08916c14e91bf..382838a5968cc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.index.EsIndex; -import org.elasticsearch.xpack.esql.core.plan.logical.LeafPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.NodeUtils; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlAggregate.java deleted file mode 100644 index 6cda14600e840..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlAggregate.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.logical; - -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; - -import static java.util.Collections.emptyList; -import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; - -/** - * Extension of Aggregate for handling duplicates. - * In ESQL is it possible to declare multiple aggregations and groupings with the same name, with the last declaration in grouping - * winning. - * As some of these declarations can be invalid, for validation reasons we need to keep the data around yet allowing will lead to - * ambiguity in the output. - * Hence this class - to allow the declaration to be moved over and thus for the Verifier to pick up the declaration while providing - * a proper output. - * To simplify things, the Aggregate class will be replaced with a vanilla one. - */ -public class EsqlAggregate extends Aggregate { - - private List lazyOutput; - - public EsqlAggregate(Source source, LogicalPlan child, List groupings, List aggregates) { - super(source, child, groupings, aggregates); - } - - @Override - public List output() { - if (lazyOutput == null) { - lazyOutput = mergeOutputAttributes(Expressions.asAttributes(aggregates()), emptyList()); - } - - return lazyOutput; - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, EsqlAggregate::new, child(), groupings(), aggregates()); - } - - @Override - public EsqlAggregate replaceChild(LogicalPlan newChild) { - return new EsqlAggregate(source(), newChild, groupings(), aggregates()); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlUnresolvedRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlUnresolvedRelation.java deleted file mode 100644 index ffc4818b6ceb5..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsqlUnresolvedRelation.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.logical; - -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.util.List; - -public class EsqlUnresolvedRelation extends UnresolvedRelation { - - private final List metadataFields; - private final IndexMode indexMode; - - public EsqlUnresolvedRelation( - Source source, - TableIdentifier table, - List metadataFields, - IndexMode indexMode, - String unresolvedMessage - ) { - super(source, table, "", false, unresolvedMessage); - this.metadataFields = metadataFields; - this.indexMode = indexMode; - } - - public EsqlUnresolvedRelation(Source source, TableIdentifier table, List metadataFields, IndexMode indexMode) { - this(source, table, metadataFields, indexMode, null); - } - - public List metadataFields() { - return metadataFields; - } - - public IndexMode indexMode() { - return indexMode; - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, EsqlUnresolvedRelation::new, table(), metadataFields(), indexMode(), unresolvedMessage()); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java index bfe11c3d33d87..20117a873c143 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Eval.java @@ -10,8 +10,6 @@ import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java index 86f3e0bdf349a..8d2640a43f38c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Explain.java @@ -9,8 +9,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LeafPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Filter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Filter.java similarity index 97% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Filter.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Filter.java index a09ffb3e07c96..46fafe57e7d26 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Filter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Filter.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.plan.logical; +package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java index 5a85e385da8ef..963fd318f814c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java @@ -15,14 +15,11 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.NamedExpressions; import org.elasticsearch.xpack.esql.parser.ParsingException; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import java.util.Comparator; import java.util.List; @@ -33,12 +30,12 @@ public class Grok extends RegexExtract { public record Parser(String pattern, org.elasticsearch.grok.Grok grok) { - private List extractedFields() { + public List extractedFields() { return grok.captureConfig() .stream() .sorted(Comparator.comparing(GrokCaptureConfig::name)) // promote small numeric types, since Grok can produce float values - .map(x -> new ReferenceAttribute(Source.EMPTY, x.name(), EsqlDataTypes.widenSmallNumericTypes(toDataType(x.type())))) + .map(x -> new ReferenceAttribute(Source.EMPTY, x.name(), toDataType(x.type()).widenSmallNumeric())) .collect(Collectors.toList()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java index 4e7dc70904189..46ec56223384c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java @@ -12,8 +12,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java index a4e733437e80f..c1c8c9aff5ca6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Keep.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/LeafPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LeafPlan.java similarity index 92% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/LeafPlan.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LeafPlan.java index 4def8356b316a..d21b61a81cd9e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/LeafPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LeafPlan.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.plan.logical; +package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Limit.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Limit.java similarity index 96% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Limit.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Limit.java index 610572f1e73ed..df5e1cf23275c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/Limit.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Limit.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.plan.logical; +package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/LogicalPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java similarity index 97% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/LogicalPlan.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java index 56e09b4e1189a..0397183c6a6c3 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/LogicalPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.plan.logical; +package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.capabilities.Resolvable; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java index 690e4595f64e5..6893935f20b5b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java @@ -11,13 +11,8 @@ import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.plan.logical.join.Join; @@ -39,7 +34,7 @@ public class Lookup extends UnaryPlan { /** * References to the input fields to match against the {@link #localRelation}. */ - private final List matchFields; + private final List matchFields; // initialized during the analysis phase for output and validation // afterward, it is converted into a Join (BinaryPlan) hence why here it is not a child private final LocalRelation localRelation; @@ -49,7 +44,7 @@ public Lookup( Source source, LogicalPlan child, Expression tableName, - List matchFields, + List matchFields, @Nullable LocalRelation localRelation ) { super(source, child); @@ -60,15 +55,15 @@ public Lookup( public Lookup(PlanStreamInput in) throws IOException { super(Source.readFrom(in), in.readLogicalPlanNode()); - this.tableName = in.readExpression(); - this.matchFields = in.readNamedWriteableCollectionAsList(NamedExpression.class); + this.tableName = in.readNamedWriteable(Expression.class); + this.matchFields = in.readNamedWriteableCollectionAsList(Attribute.class); this.localRelation = in.readBoolean() ? new LocalRelation(in) : null; } public void writeTo(PlanStreamOutput out) throws IOException { source().writeTo(out); out.writeLogicalPlanNode(child()); - out.writeExpression(tableName); + out.writeNamedWriteable(tableName); out.writeNamedWriteableCollection(matchFields); if (localRelation == null) { out.writeBoolean(false); @@ -82,7 +77,7 @@ public Expression tableName() { return tableName; } - public List matchFields() { + public List matchFields() { return matchFields; } @@ -91,17 +86,19 @@ public LocalRelation localRelation() { } public JoinConfig joinConfig() { - List conditions = new ArrayList<>(matchFields.size()); + List leftFields = new ArrayList<>(matchFields.size()); + List rightFields = new ArrayList<>(matchFields.size()); List rhsOutput = Join.makeReference(localRelation.output()); - for (NamedExpression lhs : matchFields) { + for (Attribute lhs : matchFields) { for (Attribute rhs : rhsOutput) { if (lhs.name().equals(rhs.name())) { - conditions.add(new Equals(source(), lhs, rhs)); + leftFields.add(lhs); + rightFields.add(rhs); break; } } } - return new JoinConfig(JoinType.LEFT, matchFields, conditions); + return new JoinConfig(JoinType.LEFT, matchFields, leftFields, rightFields); } @Override @@ -122,10 +119,10 @@ protected NodeInfo info() { @Override public List output() { if (lazyOutput == null) { - List rightSide = localRelation != null - ? Join.makeNullable(Join.makeReference(localRelation.output())) - : Expressions.asAttributes(matchFields); - lazyOutput = Join.mergeOutput(child().output(), rightSide, matchFields); + if (localRelation == null) { + throw new IllegalStateException("Cannot determine output of LOOKUP with unresolved table"); + } + lazyOutput = Join.computeOutput(child().output(), localRelation.output(), joinConfig()); } return lazyOutput; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java index 869d8d7dc3a26..5e9dca26a6863 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/MvExpand.java @@ -9,8 +9,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/OrderBy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/OrderBy.java similarity index 96% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/OrderBy.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/OrderBy.java index c13b3a028f0e8..68d089980074c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/OrderBy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/OrderBy.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.plan.logical; +package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Order; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java index fe28ddcc43b40..d3896b1dfc844 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Project.java @@ -10,8 +10,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.Functions; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java index 5bf45fc0f61ad..649173f11dfaf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/RegexExtract.java @@ -9,8 +9,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.List; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java index 7d99c566aa0c7..5e4b45d7127fe 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Rename.java @@ -8,8 +8,6 @@ package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java index 9af3e08a6734b..30e16d9e1b227 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Row.java @@ -11,8 +11,6 @@ import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.plan.logical.LeafPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java index ac576eaa2cb96..227d7785804d4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/TopN.java @@ -10,8 +10,6 @@ import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Order; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/UnaryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnaryPlan.java similarity index 96% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/UnaryPlan.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnaryPlan.java index 75ce38127394e..ea9a760ef5dc4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plan/logical/UnaryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnaryPlan.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.plan.logical; +package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.tree.Source; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java index eb6627bbdd0f8..cd1817367167d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java @@ -6,10 +6,13 @@ */ package org.elasticsearch.xpack.esql.plan.logical; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; +import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.plan.logical.LeafPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -23,34 +26,35 @@ public class UnresolvedRelation extends LeafPlan implements Unresolvable { private final TableIdentifier table; private final boolean frozen; - private final String alias; + private final List metadataFields; + private final IndexMode indexMode; private final String unresolvedMsg; - public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen) { - this(source, table, alias, frozen, null); - } - - public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen, String unresolvedMessage) { + public UnresolvedRelation( + Source source, + TableIdentifier table, + boolean frozen, + List metadataFields, + IndexMode indexMode, + String unresolvedMessage + ) { super(source); this.table = table; - this.alias = alias; this.frozen = frozen; + this.metadataFields = metadataFields; + this.indexMode = indexMode; this.unresolvedMsg = unresolvedMessage == null ? "Unknown index [" + table.index() + "]" : unresolvedMessage; } @Override protected NodeInfo info() { - return NodeInfo.create(this, UnresolvedRelation::new, table, alias, frozen, unresolvedMsg); + return NodeInfo.create(this, UnresolvedRelation::new, table, frozen, metadataFields, indexMode, unresolvedMsg); } public TableIdentifier table() { return table; } - public String alias() { - return alias; - } - public boolean frozen() { return frozen; } @@ -70,14 +74,32 @@ public List output() { return Collections.emptyList(); } + public List metadataFields() { + return metadataFields; + } + + public IndexMode indexMode() { + return indexMode; + } + @Override public String unresolvedMessage() { return unresolvedMsg; } + @Override + public AttributeSet references() { + AttributeSet refs = super.references(); + if (indexMode == IndexMode.TIME_SERIES) { + refs = new AttributeSet(refs); + refs.add(new UnresolvedAttribute(source(), MetadataAttribute.TIMESTAMP_FIELD)); + } + return refs; + } + @Override public int hashCode() { - return Objects.hash(source(), table, alias, unresolvedMsg); + return Objects.hash(source(), table, metadataFields, indexMode, unresolvedMsg); } @Override @@ -92,8 +114,9 @@ public boolean equals(Object obj) { UnresolvedRelation other = (UnresolvedRelation) obj; return Objects.equals(table, other.table) - && Objects.equals(alias, other.alias) && Objects.equals(frozen, other.frozen) + && Objects.equals(metadataFields, other.metadataFields) + && indexMode == other.indexMode && Objects.equals(unresolvedMsg, other.unresolvedMsg); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java index 81ec67a28bbfd..79278995b29bd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java @@ -8,26 +8,29 @@ package org.elasticsearch.xpack.esql.plan.logical.join; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.BinaryPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; public class Join extends BinaryPlan { private final JoinConfig config; - // TODO: The matching attributes from the left and right logical plans should become part of the `expressions()` - // so that `references()` returns the attributes we actually rely on. private List lazyOutput; public Join(Source source, LogicalPlan left, LogicalPlan right, JoinConfig config) { @@ -35,6 +38,19 @@ public Join(Source source, LogicalPlan left, LogicalPlan right, JoinConfig confi this.config = config; } + public Join( + Source source, + LogicalPlan left, + LogicalPlan right, + JoinType type, + List matchFields, + List leftFields, + List rightFields + ) { + super(source, left, right); + this.config = new JoinConfig(type, matchFields, leftFields, rightFields); + } + public Join(PlanStreamInput in) throws IOException { super(Source.readFrom(in), in.readLogicalPlanNode(), in.readLogicalPlanNode()); this.config = new JoinConfig(in); @@ -53,7 +69,18 @@ public JoinConfig config() { @Override protected NodeInfo info() { - return NodeInfo.create(this, Join::new, left(), right(), config); + // Do not just add the JoinConfig as a whole - this would prevent correctly registering the + // expressions and references. + return NodeInfo.create( + this, + Join::new, + left(), + right(), + config.type(), + config.matchFields(), + config.leftFields(), + config.rightFields() + ); } @Override @@ -68,47 +95,41 @@ public Join replaceChildren(LogicalPlan left, LogicalPlan right) { @Override public List output() { if (lazyOutput == null) { - lazyOutput = computeOutput(); + lazyOutput = computeOutput(left().output(), right().output(), config); } return lazyOutput; } - private List computeOutput() { - List right = makeReference(right().output()); + /** + * Merge output fields. + * Currently only implemented for LEFT JOINs; the rightOutput shadows the leftOutput, except for any attributes that + * occur in the join's matchFields. + */ + public static List computeOutput(List leftOutput, List rightOutput, JoinConfig config) { + AttributeSet matchFieldSet = new AttributeSet(config.matchFields()); + Set matchFieldNames = new HashSet<>(Expressions.names(config.matchFields())); return switch (config.type()) { - case LEFT -> // right side becomes nullable - mergeOutput(left().output(), makeNullable(right), config.matchFields()); - case RIGHT -> // left side becomes nullable - mergeOutput(makeNullable(left().output()), right, config.matchFields()); - case FULL -> // both sides become nullable - mergeOutput(makeNullable(left().output()), makeNullable(right), config.matchFields()); - default -> // neither side becomes nullable - mergeOutput(left().output(), right, config.matchFields()); + case LEFT -> { + // Right side becomes nullable. + List fieldsAddedFromRight = removeCollisionsWithMatchFields(rightOutput, matchFieldSet, matchFieldNames); + yield mergeOutputAttributes(makeNullable(makeReference(fieldsAddedFromRight)), leftOutput); + } + default -> throw new UnsupportedOperationException("Other JOINs than LEFT not supported"); }; } - /** - * Merge output fields, left hand side wins in name conflicts except - * for fields defined in {@link JoinConfig#matchFields()}. - */ - public static List mergeOutput( - List lhs, - List rhs, - List matchFields + private static List removeCollisionsWithMatchFields( + List attributes, + AttributeSet matchFields, + Set matchFieldNames ) { - List results = new ArrayList<>(lhs.size() + rhs.size()); - - for (Attribute a : lhs) { - if (rhs.contains(a) == false || matchFields.stream().anyMatch(m -> m.name().equals(a.name()))) { - results.add(a); - } - } - for (Attribute a : rhs) { - if (false == matchFields.stream().anyMatch(m -> m.name().equals(a.name()))) { - results.add(a); + List result = new ArrayList<>(); + for (Attribute attr : attributes) { + if ((matchFields.contains(attr) || matchFieldNames.contains(attr.name())) == false) { + result.add(attr); } } - return results; + return result; } /** @@ -125,7 +146,7 @@ public static List mergeOutput( public static List makeReference(List output) { List out = new ArrayList<>(output.size()); for (Attribute a : output) { - if (a.resolved()) { + if (a.resolved() && a instanceof ReferenceAttribute == false) { out.add(new ReferenceAttribute(a.source(), a.name(), a.dataType(), a.qualifier(), a.nullable(), a.id(), a.synthetic())); } else { out.add(a); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java index b5cf5d9234c6b..68ad50f2f67a0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java @@ -11,25 +11,26 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import java.io.IOException; import java.util.List; /** * Configuration for a {@code JOIN} style operation. - * @param matchFields fields that are merged from the left and right relations - * @param conditions when these conditions are true the rows are joined + * @param matchFields fields either from the left or right fields which decide which side is kept + * @param leftFields matched with the right fields + * @param rightFields matched with the left fields */ -public record JoinConfig(JoinType type, List matchFields, List conditions) implements Writeable { +public record JoinConfig(JoinType type, List matchFields, List leftFields, List rightFields) + implements + Writeable { public JoinConfig(StreamInput in) throws IOException { this( JoinType.readFrom(in), - in.readNamedWriteableCollectionAsList(NamedExpression.class), - in.readCollectionAsList(i -> ((PlanStreamInput) i).readExpression()) + in.readNamedWriteableCollectionAsList(Attribute.class), + in.readNamedWriteableCollectionAsList(Attribute.class), + in.readNamedWriteableCollectionAsList(Attribute.class) ); } @@ -37,10 +38,11 @@ public JoinConfig(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { type.writeTo(out); out.writeNamedWriteableCollection(matchFields); - out.writeCollection(conditions, (o, v) -> ((PlanStreamOutput) o).writeExpression(v)); + out.writeNamedWriteableCollection(leftFields); + out.writeNamedWriteableCollection(rightFields); } public boolean expressionsResolved() { - return Resolvables.resolved(matchFields) && Resolvables.resolved(conditions); + return Resolvables.resolved(matchFields) && Resolvables.resolved(leftFields) && Resolvables.resolved(rightFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/EsqlProject.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/EsqlProject.java index 03a9c2b68b327..e359c6f928f7c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/EsqlProject.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/EsqlProject.java @@ -8,10 +8,10 @@ package org.elasticsearch.xpack.esql.plan.logical.local; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import java.util.List; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java index 862098621e9ee..195eb3b6304e4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelation.java @@ -7,11 +7,11 @@ package org.elasticsearch.xpack.esql.plan.logical.local; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LeafPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.logical.LeafPlan; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java index 6356b2644e67a..9ac9ccdf2a876 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/meta/MetaFunctions.java @@ -11,12 +11,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; -import org.elasticsearch.xpack.esql.core.plan.logical.LeafPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.plan.logical.LeafPlan; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.Arrays; @@ -49,7 +48,7 @@ public List output() { return attributes; } - public List> values(FunctionRegistry functionRegistry) { + public List> values(EsqlFunctionRegistry functionRegistry) { List> rows = new ArrayList<>(); for (var def : functionRegistry.listFunctions(null)) { EsqlFunctionRegistry.FunctionDescription signature = EsqlFunctionRegistry.description(def); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java index 4867d8ca77a39..6e98df32580ae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/show/ShowInfo.java @@ -11,10 +11,10 @@ import org.elasticsearch.Build; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LeafPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.plan.logical.LeafPlan; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.List; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java index 13773ca61f8d8..b8f96709a583f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java @@ -28,9 +28,7 @@ import java.util.Objects; public class EsQueryExec extends LeafExec implements EstimatesRowSize { - static final EsField DOC_ID_FIELD = new EsField("_doc", DataType.DOC_DATA_TYPE, Map.of(), false); - static final EsField TSID_FIELD = new EsField("_tsid", DataType.TSID_DATA_TYPE, Map.of(), true); - static final EsField TIMESTAMP_FIELD = new EsField("@timestamp", DataType.DATETIME, Map.of(), true); + public static final EsField DOC_ID_FIELD = new EsField("_doc", DataType.DOC_DATA_TYPE, Map.of(), false); private final EsIndex index; private final IndexMode indexMode; @@ -55,8 +53,8 @@ public FieldSortBuilder fieldSortBuilder() { } } - public EsQueryExec(Source source, EsIndex index, IndexMode indexMode, QueryBuilder query) { - this(source, index, indexMode, sourceAttributes(source, indexMode), query, null, null, null); + public EsQueryExec(Source source, EsIndex index, IndexMode indexMode, List attributes, QueryBuilder query) { + this(source, index, indexMode, attributes, query, null, null, null); } public EsQueryExec( @@ -79,17 +77,6 @@ public EsQueryExec( this.estimatedRowSize = estimatedRowSize; } - private static List sourceAttributes(Source source, IndexMode indexMode) { - return switch (indexMode) { - case STANDARD, LOGS -> List.of(new FieldAttribute(source, DOC_ID_FIELD.getName(), DOC_ID_FIELD)); - case TIME_SERIES -> List.of( - new FieldAttribute(source, DOC_ID_FIELD.getName(), DOC_ID_FIELD), - new FieldAttribute(source, TSID_FIELD.getName(), TSID_FIELD), - new FieldAttribute(source, TIMESTAMP_FIELD.getName(), TIMESTAMP_FIELD) - ); - }; - } - public static boolean isSourceAttribute(Attribute attr) { return DOC_ID_FIELD.getName().equals(attr.name()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java index 8b9b5398b3cec..40c9067efbeda 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java @@ -119,7 +119,7 @@ static int estimateSize(DataType dataType) { case LONG -> Long.BYTES; case NULL -> 0; // TODO: provide a specific estimate for aggregated_metrics_double - case COMPOSITE -> throw new EsqlIllegalArgumentException("can't estimate size for composite blocks"); + case COMPOSITE -> 50; case UNKNOWN -> throw new EsqlIllegalArgumentException("[unknown] can't be the result of field extraction"); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java index 95cd732eabd45..5c01658760632 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java @@ -9,9 +9,9 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.List; import java.util.Objects; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java index dff0a6f0eade3..0415a5cbb9132 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java @@ -9,11 +9,8 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -24,13 +21,9 @@ public class HashJoinExec extends UnaryExec implements EstimatesRowSize { private final LocalSourceExec joinData; - private final List matchFields; - /** - * Conditions that must match for rows to be joined. The {@link Equals#left()} - * is always from the child and the {@link Equals#right()} is always from the - * {@link #joinData()}. - */ - private final List conditions; + private final List matchFields; + private final List leftFields; + private final List rightFields; private final List output; private AttributeSet lazyAddedFields; @@ -38,22 +31,25 @@ public HashJoinExec( Source source, PhysicalPlan child, LocalSourceExec hashData, - List matchFields, - List conditions, + List matchFields, + List leftFields, + List rightFields, List output ) { super(source, child); this.joinData = hashData; this.matchFields = matchFields; - this.conditions = conditions; + this.leftFields = leftFields; + this.rightFields = rightFields; this.output = output; } public HashJoinExec(PlanStreamInput in) throws IOException { super(Source.readFrom(in), in.readPhysicalPlanNode()); this.joinData = new LocalSourceExec(in); - this.matchFields = in.readNamedWriteableCollectionAsList(NamedExpression.class); - this.conditions = in.readCollectionAsList(i -> (Equals) EsqlBinaryComparison.readFrom(in)); + this.matchFields = in.readNamedWriteableCollectionAsList(Attribute.class); + this.leftFields = in.readNamedWriteableCollectionAsList(Attribute.class); + this.rightFields = in.readNamedWriteableCollectionAsList(Attribute.class); this.output = in.readNamedWriteableCollectionAsList(Attribute.class); } @@ -62,7 +58,8 @@ public void writeTo(PlanStreamOutput out) throws IOException { out.writePhysicalPlanNode(child()); joinData.writeTo(out); out.writeNamedWriteableCollection(matchFields); - out.writeCollection(conditions, (o, v) -> v.writeTo(o)); + out.writeNamedWriteableCollection(leftFields); + out.writeNamedWriteableCollection(rightFields); out.writeNamedWriteableCollection(output); } @@ -70,17 +67,16 @@ public LocalSourceExec joinData() { return joinData; } - public List matchFields() { + public List matchFields() { return matchFields; } - /** - * Conditions that must match for rows to be joined. The {@link Equals#left()} - * is always from the child and the {@link Equals#right()} is always from the - * {@link #joinData()}. - */ - public List conditions() { - return conditions; + public List leftFields() { + return leftFields; + } + + public List rightFields() { + return rightFields; } public Set addedFields() { @@ -104,12 +100,12 @@ public List output() { @Override public HashJoinExec replaceChild(PhysicalPlan newChild) { - return new HashJoinExec(source(), newChild, joinData, matchFields, conditions, output); + return new HashJoinExec(source(), newChild, joinData, matchFields, leftFields, rightFields, output); } @Override protected NodeInfo info() { - return NodeInfo.create(this, HashJoinExec::new, child(), joinData, matchFields, conditions, output); + return NodeInfo.create(this, HashJoinExec::new, child(), joinData, matchFields, leftFields, rightFields, output); } @Override @@ -126,12 +122,13 @@ public boolean equals(Object o) { HashJoinExec hash = (HashJoinExec) o; return joinData.equals(hash.joinData) && matchFields.equals(hash.matchFields) - && conditions.equals(hash.conditions) + && leftFields.equals(hash.leftFields) + && rightFields.equals(hash.rightFields) && output.equals(hash.output); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), joinData, matchFields, conditions, output); + return Objects.hash(super.hashCode(), joinData, matchFields, leftFields, rightFields, output); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java index d0481129cee8a..3971e79cdc6d9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java @@ -257,16 +257,17 @@ private void aggregatesToFactory( ); } } else { - Attribute attr = Expressions.attribute(field); - // cannot determine attribute - if (attr == null) { - throw new EsqlIllegalArgumentException( - "Cannot work with target field [{}] for agg [{}]", - field.sourceText(), - aggregateFunction.sourceText() - ); - } - sourceAttr = List.of(attr); + sourceAttr = aggregateFunction.inputExpressions().stream().map(e -> { + Attribute attr = Expressions.attribute(e); + if (attr == null) { + throw new EsqlIllegalArgumentException( + "Cannot work with target field [{}] for agg [{}]", + e.sourceText(), + aggregateFunction.sourceText() + ); + } + return attr; + }).toList(); } } else if (mode == AggregatorMode.FINAL || mode == AggregatorMode.INTERMEDIATE) { if (grouping) { @@ -277,16 +278,8 @@ private void aggregatesToFactory( } else { throw new EsqlIllegalArgumentException("illegal aggregation mode"); } - var aggParams = aggregateFunction.parameters(); - Object[] params = new Object[aggParams.size()]; - for (int i = 0; i < params.length; i++) { - params[i] = aggParams.get(i).fold(); - } - List inputChannels = sourceAttr.stream().map(attr -> layout.get(attr.id()).channel()).toList(); - if (inputChannels.size() > 0) { - assert inputChannels.size() > 0 && inputChannels.stream().allMatch(i -> i >= 0); - } + assert inputChannels.stream().allMatch(i -> i >= 0) : inputChannels; if (aggregateFunction instanceof ToAggregator agg) { consumer.accept(new AggFunctionSupplierContext(agg.supplier(inputChannels), mode)); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 83fdd5dc0c5d2..87775d5048752 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.planner; +import org.elasticsearch.common.Strings; import org.elasticsearch.compute.aggregation.IntermediateStateDesc; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; @@ -24,15 +25,18 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; +import org.elasticsearch.xpack.esql.expression.function.aggregate.FromPartial; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; import org.elasticsearch.xpack.esql.expression.function.aggregate.NumericAggregate; import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; -import org.elasticsearch.xpack.esql.expression.function.aggregate.TopList; +import org.elasticsearch.xpack.esql.expression.function.aggregate.ToPartial; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Top; import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import java.lang.invoke.MethodHandle; @@ -63,7 +67,12 @@ final class AggregateMapper { SpatialCentroid.class, Sum.class, Values.class, - TopList.class + Top.class, + Rate.class, + + // internal function + FromPartial.class, + ToPartial.class ); /** Record of agg Class, type, and grouping (or non-grouping). */ @@ -137,6 +146,8 @@ private static Stream, Tuple>> typeAndNames(Class List extraConfigs = List.of(""); if (NumericAggregate.class.isAssignableFrom(clazz)) { types = NUMERIC; + } else if (Max.class.isAssignableFrom(clazz) || Min.class.isAssignableFrom(clazz)) { + types = List.of("Boolean", "Int", "Long", "Double"); } else if (clazz == Count.class) { types = List.of(""); // no extra type distinction } else if (SpatialAggregateFunction.class.isAssignableFrom(clazz)) { @@ -145,11 +156,17 @@ private static Stream, Tuple>> typeAndNames(Class } else if (Values.class.isAssignableFrom(clazz)) { // TODO can't we figure this out from the function itself? types = List.of("Int", "Long", "Double", "Boolean", "BytesRef"); - } else if (TopList.class.isAssignableFrom(clazz)) { + } else if (Top.class.isAssignableFrom(clazz)) { types = List.of("Int", "Long", "Double"); - } else { - assert clazz == CountDistinct.class : "Expected CountDistinct, got: " + clazz; + } else if (Rate.class.isAssignableFrom(clazz)) { + types = List.of("Int", "Long", "Double"); + } else if (FromPartial.class.isAssignableFrom(clazz) || ToPartial.class.isAssignableFrom(clazz)) { + types = List.of(""); // no type + } else if (CountDistinct.class.isAssignableFrom(clazz)) { types = Stream.concat(NUMERIC.stream(), Stream.of("Boolean", "BytesRef")).toList(); + } else { + assert false : "unknown aggregate type " + clazz; + throw new IllegalArgumentException("unknown aggregate type " + clazz); } return combinations(types, extraConfigs).map(combo -> new Tuple<>(clazz, combo)); } @@ -159,10 +176,15 @@ private static Stream> combinations(List types, Li } private static Stream groupingAndNonGrouping(Tuple, Tuple> tuple) { - return Stream.of( - new AggDef(tuple.v1(), tuple.v2().v1(), tuple.v2().v2(), true), - new AggDef(tuple.v1(), tuple.v2().v1(), tuple.v2().v2(), false) - ); + if (tuple.v1().isAssignableFrom(Rate.class)) { + // rate doesn't support non-grouping aggregations + return Stream.of(new AggDef(tuple.v1(), tuple.v2().v1(), tuple.v2().v2(), true)); + } else { + return Stream.of( + new AggDef(tuple.v1(), tuple.v2().v1(), tuple.v2().v2(), true), + new AggDef(tuple.v1(), tuple.v2().v1(), tuple.v2().v2(), false) + ); + } } private static AggDef aggDefOrNull(Expression aggregate, boolean grouping) { @@ -224,7 +246,15 @@ private static String determinePackageName(Class clazz) { /** Maps intermediate state description to named expressions. */ private static Stream isToNE(List intermediateStateDescs) { - return intermediateStateDescs.stream().map(is -> new ReferenceAttribute(Source.EMPTY, is.name(), toDataType(is.type()))); + return intermediateStateDescs.stream().map(is -> { + final DataType dataType; + if (Strings.isEmpty(is.dataType())) { + dataType = toDataType(is.type()); + } else { + dataType = DataType.fromEs(is.dataType()); + } + return new ReferenceAttribute(Source.EMPTY, is.name(), dataType); + }); } /** Returns the data type for the engines element type. */ @@ -245,13 +275,16 @@ private static String dataTypeToString(DataType type, Class aggClass) { if (aggClass == Count.class) { return ""; // no type distinction } + if (aggClass == ToPartial.class || aggClass == FromPartial.class) { + return ""; + } if (type.equals(DataType.BOOLEAN)) { return "Boolean"; - } else if (type.equals(DataType.INTEGER)) { + } else if (type.equals(DataType.INTEGER) || type.equals(DataType.COUNTER_INTEGER)) { return "Int"; - } else if (type.equals(DataType.LONG) || type.equals(DataType.DATETIME)) { + } else if (type.equals(DataType.LONG) || type.equals(DataType.DATETIME) || type.equals(DataType.COUNTER_LONG)) { return "Long"; - } else if (type.equals(DataType.DOUBLE)) { + } else if (type.equals(DataType.DOUBLE) || type.equals(DataType.COUNTER_DOUBLE)) { return "Double"; } else if (type.equals(DataType.KEYWORD) || type.equals(DataType.IP) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 825057c20d0e0..9386e77691a43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -33,6 +33,7 @@ import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -59,7 +60,6 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.DriverParallelism; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.io.IOException; @@ -118,7 +118,7 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi MappedFieldType.FieldExtractPreference fieldExtractPreference = PlannerUtils.extractPreference(docValuesAttrs.contains(attr)); ElementType elementType = PlannerUtils.toElementType(dataType, fieldExtractPreference); String fieldName = attr.name(); - boolean isUnsupported = EsqlDataTypes.isUnsupported(dataType); + boolean isUnsupported = dataType == DataType.UNSUPPORTED; IntFunction loader = s -> getBlockLoaderFor(s, fieldName, isUnsupported, fieldExtractPreference, unionTypes); fields.add(new ValuesSourceReaderOperator.FieldInfo(fieldName, elementType, loader)); } @@ -233,9 +233,10 @@ public final Operator.OperatorFactory ordinalGroupingOperatorFactory( .toList(); // The grouping-by values are ready, let's group on them directly. // Costin: why are they ready and not already exposed in the layout? - boolean isUnsupported = EsqlDataTypes.isUnsupported(attrSource.dataType()); + boolean isUnsupported = attrSource.dataType() == DataType.UNSUPPORTED; + var unionTypes = findUnionTypes(attrSource); return new OrdinalsGroupingOperator.OrdinalsGroupingOperatorFactory( - shardIdx -> shardContexts.get(shardIdx).blockLoader(attrSource.name(), isUnsupported, NONE), + shardIdx -> getBlockLoaderFor(shardIdx, attrSource.name(), isUnsupported, NONE, unionTypes), vsShardContexts, groupElementType, docChannel, @@ -323,6 +324,11 @@ public String indexName() { return ctx.getFullyQualifiedIndex().getName(); } + @Override + public IndexSettings indexSettings() { + return ctx.getIndexSettings(); + } + @Override public MappedFieldType.FieldExtractPreference fieldExtractPreference() { return fieldExtractPreference; @@ -435,12 +441,13 @@ public StoredFieldsSpec rowStrideStoredFieldSpec() { @Override public boolean supportsOrdinals() { - return delegate.supportsOrdinals(); + // Fields with mismatching types cannot use ordinals for uniqueness determination, but must convert the values first + return false; } @Override - public SortedSetDocValues ordinals(LeafReaderContext context) throws IOException { - return delegate.ordinals(context); + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new IllegalArgumentException("Ordinals are not supported for type conversion"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 349483116a0a8..e87006ec7ee09 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -294,7 +294,7 @@ private static boolean isInRange(DataType numericFieldDataType, DataType valueDa // Unsigned longs may be represented as BigInteger. decimalValue = new BigDecimal(bigIntValue); } else { - decimalValue = valueDataType.isRational() ? BigDecimal.valueOf(doubleValue) : BigDecimal.valueOf(value.longValue()); + decimalValue = valueDataType.isRationalNumber() ? BigDecimal.valueOf(doubleValue) : BigDecimal.valueOf(value.longValue()); } // Determine min/max for dataType. Use BigDecimals as doubles will have rounding errors for long/ulong. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index dc7e09dc8f174..ddf5fa6eaf8a3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -63,7 +63,6 @@ import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.command.GrokEvaluatorExtracter; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -345,17 +344,17 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte List inverse = source.layout.inverse(); for (int channel = 0; channel < inverse.size(); channel++) { elementTypes[channel] = PlannerUtils.toElementType(inverse.get(channel).type()); - encoders[channel] = switch (inverse.get(channel).type().typeName()) { - case "ip" -> TopNEncoder.IP; - case "text", "keyword" -> TopNEncoder.UTF8; - case "version" -> TopNEncoder.VERSION; - case "boolean", "null", "byte", "short", "integer", "long", "double", "float", "half_float", "datetime", "date_period", - "time_duration", "object", "nested", "scaled_float", "unsigned_long", "_doc", "_tsid" -> TopNEncoder.DEFAULT_SORTABLE; - case "geo_point", "cartesian_point", "geo_shape", "cartesian_shape", "counter_long", "counter_integer", "counter_double" -> + encoders[channel] = switch (inverse.get(channel).type()) { + case IP -> TopNEncoder.IP; + case TEXT, KEYWORD -> TopNEncoder.UTF8; + case VERSION -> TopNEncoder.VERSION; + case BOOLEAN, NULL, BYTE, SHORT, INTEGER, LONG, DOUBLE, FLOAT, HALF_FLOAT, DATETIME, DATE_PERIOD, TIME_DURATION, OBJECT, + NESTED, SCALED_FLOAT, UNSIGNED_LONG, DOC_DATA_TYPE, TSID_DATA_TYPE -> TopNEncoder.DEFAULT_SORTABLE; + case GEO_POINT, CARTESIAN_POINT, GEO_SHAPE, CARTESIAN_SHAPE, COUNTER_LONG, COUNTER_INTEGER, COUNTER_DOUBLE -> TopNEncoder.DEFAULT_UNSORTABLE; // unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point - case "unsupported" -> TopNEncoder.UNSUPPORTED; - default -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); + case PARTIAL_AGG, UNSUPPORTED -> TopNEncoder.UNSUPPORTED; + case SOURCE -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); }; } List orders = topNExec.order().stream().map(order -> { @@ -501,21 +500,21 @@ private PhysicalOperation planHashJoin(HashJoinExec join, LocalExecutionPlannerC Layout layout = layoutBuilder.build(); Block[] localData = join.joinData().supplier().get(); - RowInTableLookupOperator.Key[] keys = new RowInTableLookupOperator.Key[join.conditions().size()]; - int[] blockMapping = new int[join.conditions().size()]; - for (int k = 0; k < join.conditions().size(); k++) { - Equals cond = join.conditions().get(k); + RowInTableLookupOperator.Key[] keys = new RowInTableLookupOperator.Key[join.leftFields().size()]; + int[] blockMapping = new int[join.leftFields().size()]; + for (int k = 0; k < join.leftFields().size(); k++) { + Attribute left = join.leftFields().get(k); + Attribute right = join.rightFields().get(k); Block localField = null; for (int l = 0; l < join.joinData().output().size(); l++) { - if (join.joinData().output().get(l).name().equals((((NamedExpression) cond.right()).name()))) { + if (join.joinData().output().get(l).name().equals((((NamedExpression) right).name()))) { localField = localData[l]; } } if (localField == null) { - throw new IllegalArgumentException("can't find local data for [" + cond.right() + "]"); + throw new IllegalArgumentException("can't find local data for [" + right + "]"); } - NamedExpression left = (NamedExpression) cond.left(); keys[k] = new RowInTableLookupOperator.Key(left.name(), localField); Layout.ChannelAndType input = source.layout.get(left.id()); blockMapping[k] = input.channel(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index 9518954f78c64..84ed4663496de 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -9,26 +9,25 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; -import org.elasticsearch.xpack.esql.core.plan.logical.BinaryPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.BinaryPlan; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.meta.MetaFunctions; @@ -53,19 +52,16 @@ import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; -import java.util.ArrayList; -import java.util.List; - import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode; import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.FINAL; import static org.elasticsearch.xpack.esql.plan.physical.AggregateExec.Mode.PARTIAL; public class Mapper { - private final FunctionRegistry functionRegistry; + private final EsqlFunctionRegistry functionRegistry; private final boolean localMode; // non-coordinator (data node) mode - public Mapper(FunctionRegistry functionRegistry) { + public Mapper(EsqlFunctionRegistry functionRegistry) { this.functionRegistry = functionRegistry; localMode = false; } @@ -278,19 +274,20 @@ private PhysicalPlan map(BinaryPlan p, PhysicalPlan lhs, PhysicalPlan rhs) { } private PhysicalPlan tryHashJoin(Join join, PhysicalPlan lhs, PhysicalPlan rhs) { - if (join.config().type() != JoinType.LEFT) { + JoinConfig config = join.config(); + if (config.type() != JoinType.LEFT) { return null; } - List conditions = new ArrayList<>(join.config().conditions().size()); - for (Expression cond : join.config().conditions()) { - if (cond instanceof Equals eq) { - conditions.add(eq); - } else { - return null; - } - } if (rhs instanceof LocalSourceExec local) { - return new HashJoinExec(join.source(), lhs, local, join.config().matchFields(), conditions, join.output()); + return new HashJoinExec( + join.source(), + lhs, + local, + config.matchFields(), + config.leftFields(), + config.rightFields(), + join.output() + ); } return null; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index cc28839fd6575..d9f073d952a37 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -21,11 +21,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -36,7 +31,12 @@ import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -60,6 +60,7 @@ import static java.util.Arrays.asList; import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.DOC_VALUES; +import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.NONE; import static org.elasticsearch.xpack.esql.core.util.Queries.Clause.FILTER; import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.PushFiltersToSource.canPushToSource; import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.TRANSLATOR_HANDLER; @@ -230,7 +231,7 @@ public static ElementType toSortableElementType(DataType dataType) { * Map QL's {@link DataType} to the compute engine's {@link ElementType}. */ public static ElementType toElementType(DataType dataType) { - return toElementType(dataType, MappedFieldType.FieldExtractPreference.NONE); + return toElementType(dataType, NONE); } /** @@ -239,47 +240,23 @@ public static ElementType toElementType(DataType dataType) { * For example, spatial types can be extracted into doc-values under specific conditions, otherwise they extract as BytesRef. */ public static ElementType toElementType(DataType dataType, MappedFieldType.FieldExtractPreference fieldExtractPreference) { - if (dataType == DataType.LONG - || dataType == DataType.DATETIME - || dataType == DataType.UNSIGNED_LONG - || dataType == DataType.COUNTER_LONG) { - return ElementType.LONG; - } - if (dataType == DataType.INTEGER || dataType == DataType.COUNTER_INTEGER) { - return ElementType.INT; - } - if (dataType == DataType.DOUBLE || dataType == DataType.COUNTER_DOUBLE) { - return ElementType.DOUBLE; - } - // unsupported fields are passed through as a BytesRef - if (dataType == DataType.KEYWORD - || dataType == DataType.TEXT - || dataType == DataType.IP - || dataType == DataType.SOURCE - || dataType == DataType.VERSION - || dataType == DataType.UNSUPPORTED) { - return ElementType.BYTES_REF; - } - if (dataType == DataType.NULL) { - return ElementType.NULL; - } - if (dataType == DataType.BOOLEAN) { - return ElementType.BOOLEAN; - } - if (dataType == DataType.DOC_DATA_TYPE) { - return ElementType.DOC; - } - if (dataType == DataType.TSID_DATA_TYPE) { - return ElementType.BYTES_REF; - } - if (EsqlDataTypes.isSpatialPoint(dataType)) { - return fieldExtractPreference == DOC_VALUES ? ElementType.LONG : ElementType.BYTES_REF; - } - if (EsqlDataTypes.isSpatial(dataType)) { - // TODO: support forStats for shape aggregations, like st_centroid - return ElementType.BYTES_REF; - } - throw EsqlIllegalArgumentException.illegalDataType(dataType); + + return switch (dataType) { + case LONG, DATETIME, UNSIGNED_LONG, COUNTER_LONG -> ElementType.LONG; + case INTEGER, COUNTER_INTEGER -> ElementType.INT; + case DOUBLE, COUNTER_DOUBLE -> ElementType.DOUBLE; + // unsupported fields are passed through as a BytesRef + case KEYWORD, TEXT, IP, SOURCE, VERSION, UNSUPPORTED -> ElementType.BYTES_REF; + case NULL -> ElementType.NULL; + case BOOLEAN -> ElementType.BOOLEAN; + case DOC_DATA_TYPE -> ElementType.DOC; + case TSID_DATA_TYPE -> ElementType.BYTES_REF; + case GEO_POINT, CARTESIAN_POINT -> fieldExtractPreference == DOC_VALUES ? ElementType.LONG : ElementType.BYTES_REF; + case GEO_SHAPE, CARTESIAN_SHAPE -> ElementType.BYTES_REF; + case PARTIAL_AGG -> ElementType.COMPOSITE; + case SHORT, BYTE, DATE_PERIOD, TIME_DURATION, OBJECT, NESTED, FLOAT, HALF_FLOAT, SCALED_FLOAT -> + throw EsqlIllegalArgumentException.illegalDataType(dataType); + }; } /** @@ -296,6 +273,6 @@ public static ElementType toElementType(DataType dataType, MappedFieldType.Field * Returns DOC_VALUES if the given boolean is set. */ public static MappedFieldType.FieldExtractPreference extractPreference(boolean hasPreference) { - return hasPreference ? MappedFieldType.FieldExtractPreference.DOC_VALUES : MappedFieldType.FieldExtractPreference.NONE; + return hasPreference ? DOC_VALUES : NONE; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java new file mode 100644 index 0000000000000..01d50d505f7f2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.FailureCollector; +import org.elasticsearch.compute.operator.ResponseHeadersCollector; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A variant of {@link RefCountingListener} with the following differences: + * 1. Automatically cancels sub tasks on failure. + * 2. Collects driver profiles from sub tasks. + * 3. Collects response headers from sub tasks, specifically warnings emitted during compute + * 4. Collects failures and returns the most appropriate exception to the caller. + */ +final class ComputeListener implements Releasable { + private static final Logger LOGGER = LogManager.getLogger(ComputeService.class); + + private final RefCountingListener refs; + private final FailureCollector failureCollector = new FailureCollector(); + private final AtomicBoolean cancelled = new AtomicBoolean(); + private final CancellableTask task; + private final TransportService transportService; + private final List collectedProfiles; + private final ResponseHeadersCollector responseHeaders; + + ComputeListener(TransportService transportService, CancellableTask task, ActionListener delegate) { + this.transportService = transportService; + this.task = task; + this.responseHeaders = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); + this.collectedProfiles = Collections.synchronizedList(new ArrayList<>()); + this.refs = new RefCountingListener(1, ActionListener.wrap(ignored -> { + responseHeaders.finish(); + var result = new ComputeResponse(collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList()); + delegate.onResponse(result); + }, e -> delegate.onFailure(failureCollector.getFailure()))); + } + + /** + * Acquires a new listener that doesn't collect result + */ + ActionListener acquireAvoid() { + return refs.acquire().delegateResponse((l, e) -> { + failureCollector.unwrapAndCollect(e); + try { + if (cancelled.compareAndSet(false, true)) { + LOGGER.debug("cancelling ESQL task {} on failure", task); + transportService.getTaskManager().cancelTaskAndDescendants(task, "cancelled on failure", false, ActionListener.noop()); + } + } finally { + l.onFailure(e); + } + }); + } + + /** + * Acquires a new listener that collects compute result. This listener will also collects warnings emitted during compute + */ + ActionListener acquireCompute() { + return acquireAvoid().map(resp -> { + responseHeaders.collect(); + var profiles = resp.getProfiles(); + if (profiles != null && profiles.isEmpty() == false) { + collectedProfiles.addAll(profiles); + } + return null; + }); + } + + @Override + public void close() { + refs.close(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 4ebc4af258134..673e320e5106b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -27,9 +27,7 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; -import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.DriverTaskRunner; -import org.elasticsearch.compute.operator.ResponseHeadersCollector; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.compute.operator.exchange.ExchangeSink; import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; @@ -72,6 +70,7 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.esql.session.Result; import java.util.ArrayList; import java.util.Collections; @@ -81,7 +80,6 @@ import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; @@ -89,8 +87,6 @@ * Computes the result of a {@link PhysicalPlan}. */ public class ComputeService { - public record Result(List pages, List profiles) {} - private static final Logger LOGGER = LogManager.getLogger(ComputeService.class); private final SearchService searchService; private final BigArrays bigArrays; @@ -172,13 +168,16 @@ public void execute( null, null ); - runCompute( - rootTask, - computeContext, - coordinatorPlan, - listener.map(driverProfiles -> new Result(collectedPages, driverProfiles)) - ); - return; + try ( + var computeListener = new ComputeListener( + transportService, + rootTask, + listener.map(r -> new Result(physicalPlan.output(), collectedPages, r.getProfiles())) + ) + ) { + runCompute(rootTask, computeContext, coordinatorPlan, computeListener.acquireCompute()); + return; + } } else { if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { var error = "expected concrete indices with data node plan but got empty; data node plan " + dataNodePlan; @@ -191,31 +190,25 @@ public void execute( .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); - listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); - final AtomicBoolean cancelled = new AtomicBoolean(); - final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); final var exchangeSource = new ExchangeSourceHandler( queryPragmas.exchangeBufferSize(), transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); try ( Releasable ignored = exchangeSource.addEmptySink(); - RefCountingListener refs = new RefCountingListener(listener.map(unused -> new Result(collectedPages, collectedProfiles))) + var computeListener = new ComputeListener( + transportService, + rootTask, + listener.map(r -> new Result(physicalPlan.output(), collectedPages, r.getProfiles())) + ) ) { // run compute on the coordinator - exchangeSource.addCompletionListener(refs.acquire()); + exchangeSource.addCompletionListener(computeListener.acquireAvoid()); runCompute( rootTask, new ComputeContext(sessionId, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, exchangeSource, null), coordinatorPlan, - cancelOnFailure(rootTask, cancelled, refs.acquire()).map(driverProfiles -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(driverProfiles); - } - return null; - }) + computeListener.acquireCompute() ); // starts computes on data nodes on the main cluster if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { @@ -228,17 +221,10 @@ public void execute( Set.of(localConcreteIndices.indices()), localOriginalIndices.indices(), exchangeSource, - ActionListener.releaseAfter(refs.acquire(), exchangeSource.addEmptySink()), - () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(response.getProfiles()); - } - return null; - }) + computeListener ); } - // starts computes on remote cluster + // starts computes on remote clusters startComputeOnRemoteClusters( sessionId, rootTask, @@ -246,13 +232,7 @@ public void execute( dataNodePlan, exchangeSource, getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), - () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(response.getProfiles()); - } - return null; - }) + computeListener ); } } @@ -288,8 +268,7 @@ private void startComputeOnDataNodes( Set concreteIndices, String[] originalIndices, ExchangeSourceHandler exchangeSource, - ActionListener parentListener, - Supplier> dataNodeListenerSupplier + ComputeListener computeListener ) { var planWithReducer = configuration.pragmas().nodeLevelReduction() == false ? dataNodePlan @@ -303,12 +282,12 @@ private void startComputeOnDataNodes( // Since it's used only for @timestamp, it is relatively safe to assume it's not needed // but it would be better to have a proper impl. QueryBuilder requestFilter = PlannerUtils.requestFilter(planWithReducer, x -> true); + var lookupListener = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodes -> { - try (RefCountingRunnable refs = new RefCountingRunnable(() -> parentListener.onResponse(null))) { + try (RefCountingListener refs = new RefCountingListener(lookupListener)) { // For each target node, first open a remote exchange on the remote node, then link the exchange source to // the new remote exchange sink, and initialize the computation on the target node via data-node-request. for (DataNode node : dataNodes) { - var dataNodeListener = ActionListener.releaseAfter(dataNodeListenerSupplier.get(), refs.acquire()); var queryPragmas = configuration.pragmas(); ExchangeService.openExchange( transportService, @@ -316,9 +295,10 @@ private void startComputeOnDataNodes( sessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, - dataNodeListener.delegateFailureAndWrap((delegate, unused) -> { + refs.acquire().delegateFailureAndWrap((l, unused) -> { var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection); exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + var dataNodeListener = ActionListener.runBefore(computeListener.acquireCompute(), () -> l.onResponse(null)); transportService.sendChildRequest( node.connection, DATA_ACTION_NAME, @@ -332,13 +312,13 @@ private void startComputeOnDataNodes( ), parentTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(delegate, ComputeResponse::new, esqlExecutor) + new ActionListenerResponseHandler<>(dataNodeListener, ComputeResponse::new, esqlExecutor) ); }) ); } } - }, parentListener::onFailure)); + }, lookupListener::onFailure)); } private void startComputeOnRemoteClusters( @@ -348,19 +328,19 @@ private void startComputeOnRemoteClusters( PhysicalPlan plan, ExchangeSourceHandler exchangeSource, List clusters, - Supplier> listener + ComputeListener computeListener ) { - try (RefCountingRunnable refs = new RefCountingRunnable(exchangeSource.addEmptySink()::close)) { + var queryPragmas = configuration.pragmas(); + var linkExchangeListeners = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); + try (RefCountingListener refs = new RefCountingListener(linkExchangeListeners)) { for (RemoteCluster cluster : clusters) { - var targetNodeListener = ActionListener.releaseAfter(listener.get(), refs.acquire()); - var queryPragmas = configuration.pragmas(); ExchangeService.openExchange( transportService, cluster.connection, sessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, - targetNodeListener.delegateFailureAndWrap((l, unused) -> { + refs.acquire().delegateFailureAndWrap((l, unused) -> { var remoteSink = exchangeService.newRemoteSink(rootTask, sessionId, transportService, cluster.connection); exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); var clusterRequest = new ClusterComputeRequest( @@ -371,13 +351,14 @@ private void startComputeOnRemoteClusters( cluster.concreteIndices, cluster.originalIndices ); + var clusterListener = ActionListener.runBefore(computeListener.acquireCompute(), () -> l.onResponse(null)); transportService.sendChildRequest( cluster.connection, CLUSTER_ACTION_NAME, clusterRequest, rootTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(l, ComputeResponse::new, esqlExecutor) + new ActionListenerResponseHandler<>(clusterListener, ComputeResponse::new, esqlExecutor) ); }) ); @@ -385,17 +366,7 @@ private void startComputeOnRemoteClusters( } } - private ActionListener cancelOnFailure(CancellableTask task, AtomicBoolean cancelled, ActionListener listener) { - return listener.delegateResponse((l, e) -> { - l.onFailure(e); - if (cancelled.compareAndSet(false, true)) { - LOGGER.debug("cancelling ESQL task {} on failure", task); - transportService.getTaskManager().cancelTaskAndDescendants(task, "cancelled", false, ActionListener.noop()); - } - }); - } - - void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, ActionListener> listener) { + void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, ActionListener listener) { listener = ActionListener.runBefore(listener, () -> Releasables.close(context.searchContexts)); List contexts = new ArrayList<>(context.searchContexts.size()); for (int i = 0; i < context.searchContexts.size(); i++) { @@ -445,9 +416,10 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, } ActionListener listenerCollectingStatus = listener.map(ignored -> { if (context.configuration.profile()) { - return drivers.stream().map(Driver::profile).toList(); + return new ComputeResponse(drivers.stream().map(Driver::profile).toList()); + } else { + return new ComputeResponse(List.of()); } - return null; }); listenerCollectingStatus = ActionListener.releaseAfter(listenerCollectingStatus, () -> Releasables.close(drivers)); driverRunner.executeDrivers( @@ -612,8 +584,7 @@ private class DataNodeRequestExecutor { private final DataNodeRequest request; private final CancellableTask parentTask; private final ExchangeSinkHandler exchangeSink; - private final ActionListener listener; - private final List driverProfiles; + private final ComputeListener computeListener; private final int maxConcurrentShards; private final ExchangeSink blockingSink; // block until we have completed on all shards or the coordinator has enough data @@ -622,14 +593,12 @@ private class DataNodeRequestExecutor { CancellableTask parentTask, ExchangeSinkHandler exchangeSink, int maxConcurrentShards, - List driverProfiles, - ActionListener listener + ComputeListener computeListener ) { this.request = request; this.parentTask = parentTask; this.exchangeSink = exchangeSink; - this.listener = listener; - this.driverProfiles = driverProfiles; + this.computeListener = computeListener; this.maxConcurrentShards = maxConcurrentShards; this.blockingSink = exchangeSink.createExchangeSink(); } @@ -647,40 +616,46 @@ private void runBatch(int startBatchIndex) { final var sessionId = request.sessionId(); final int endBatchIndex = Math.min(startBatchIndex + maxConcurrentShards, request.shardIds().size()); List shardIds = request.shardIds().subList(startBatchIndex, endBatchIndex); + ActionListener batchListener = new ActionListener<>() { + final ActionListener ref = computeListener.acquireCompute(); + + @Override + public void onResponse(ComputeResponse result) { + try { + onBatchCompleted(endBatchIndex); + } finally { + ref.onResponse(result); + } + } + + @Override + public void onFailure(Exception e) { + try { + exchangeService.finishSinkHandler(request.sessionId(), e); + } finally { + ref.onFailure(e); + } + } + }; acquireSearchContexts(clusterAlias, shardIds, configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH, ESQL_WORKER_THREAD_POOL_NAME); var computeContext = new ComputeContext(sessionId, clusterAlias, searchContexts, configuration, null, exchangeSink); - runCompute( - parentTask, - computeContext, - request.plan(), - ActionListener.wrap(profiles -> onBatchCompleted(endBatchIndex, profiles), this::onFailure) - ); - }, this::onFailure)); + runCompute(parentTask, computeContext, request.plan(), batchListener); + }, batchListener::onFailure)); } - private void onBatchCompleted(int lastBatchIndex, List batchProfiles) { - if (request.configuration().profile()) { - driverProfiles.addAll(batchProfiles); - } + private void onBatchCompleted(int lastBatchIndex) { if (lastBatchIndex < request.shardIds().size() && exchangeSink.isFinished() == false) { runBatch(lastBatchIndex); } else { - blockingSink.finish(); // don't return until all pages are fetched + var completionListener = computeListener.acquireAvoid(); exchangeSink.addCompletionListener( - ContextPreservingActionListener.wrapPreservingContext( - ActionListener.runBefore(listener, () -> exchangeService.finishSinkHandler(request.sessionId(), null)), - transportService.getThreadPool().getThreadContext() - ) + ActionListener.runAfter(completionListener, () -> exchangeService.finishSinkHandler(request.sessionId(), null)) ); + blockingSink.finish(); } } - - private void onFailure(Exception e) { - exchangeService.finishSinkHandler(request.sessionId(), e); - listener.onFailure(e); - } } private void runComputeOnDataNode( @@ -688,17 +663,10 @@ private void runComputeOnDataNode( String externalId, PhysicalPlan reducePlan, DataNodeRequest request, - ActionListener listener + ComputeListener computeListener ) { - final List collectedProfiles = request.configuration().profile() - ? Collections.synchronizedList(new ArrayList<>()) - : List.of(); - final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); - final RefCountingListener listenerRefs = new RefCountingListener( - ActionListener.runBefore(listener.map(unused -> new ComputeResponse(collectedProfiles)), responseHeadersCollector::finish) - ); + var parentListener = computeListener.acquireAvoid(); try { - final AtomicBoolean cancelled = new AtomicBoolean(); // run compute with target shards var internalSink = exchangeService.createSinkHandler(request.sessionId(), request.pragmas().exchangeBufferSize()); DataNodeRequestExecutor dataNodeRequestExecutor = new DataNodeRequestExecutor( @@ -706,17 +674,16 @@ private void runComputeOnDataNode( task, internalSink, request.configuration().pragmas().maxConcurrentShardsPerNode(), - collectedProfiles, - ActionListener.runBefore(cancelOnFailure(task, cancelled, listenerRefs.acquire()), responseHeadersCollector::collect) + computeListener ); dataNodeRequestExecutor.start(); // run the node-level reduction var externalSink = exchangeService.getSinkHandler(externalId); task.addListener(() -> exchangeService.finishSinkHandler(externalId, new TaskCancelledException(task.getReasonCancelled()))); var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor); - exchangeSource.addCompletionListener(listenerRefs.acquire()); + exchangeSource.addCompletionListener(computeListener.acquireAvoid()); exchangeSource.addRemoteSink(internalSink::fetchPageAsync, 1); - ActionListener reductionListener = cancelOnFailure(task, cancelled, listenerRefs.acquire()); + ActionListener reductionListener = computeListener.acquireCompute(); runCompute( task, new ComputeContext( @@ -728,26 +695,22 @@ private void runComputeOnDataNode( externalSink ), reducePlan, - ActionListener.wrap(driverProfiles -> { - responseHeadersCollector.collect(); - if (request.configuration().profile()) { - collectedProfiles.addAll(driverProfiles); - } + ActionListener.wrap(resp -> { // don't return until all pages are fetched - externalSink.addCompletionListener( - ActionListener.runBefore(reductionListener, () -> exchangeService.finishSinkHandler(externalId, null)) - ); + externalSink.addCompletionListener(ActionListener.running(() -> { + exchangeService.finishSinkHandler(externalId, null); + reductionListener.onResponse(resp); + })); }, e -> { exchangeService.finishSinkHandler(externalId, e); reductionListener.onFailure(e); }) ); + parentListener.onResponse(null); } catch (Exception e) { exchangeService.finishSinkHandler(externalId, e); exchangeService.finishSinkHandler(request.sessionId(), e); - listenerRefs.acquire().onFailure(e); - } finally { - listenerRefs.close(); + parentListener.onFailure(e); } } @@ -784,7 +747,9 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T request.aliasFilters(), request.plan() ); - runComputeOnDataNode((CancellableTask) task, sessionId, reducePlan, request, listener); + try (var computeListener = new ComputeListener(transportService, (CancellableTask) task, listener)) { + runComputeOnDataNode((CancellableTask) task, sessionId, reducePlan, request, computeListener); + } } } @@ -798,16 +763,18 @@ public void messageReceived(ClusterComputeRequest request, TransportChannel chan listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + request.plan())); return; } - runComputeOnRemoteCluster( - request.clusterAlias(), - request.sessionId(), - (CancellableTask) task, - request.configuration(), - (ExchangeSinkExec) request.plan(), - Set.of(request.indices()), - request.originalIndices(), - listener - ); + try (var computeListener = new ComputeListener(transportService, (CancellableTask) task, listener)) { + runComputeOnRemoteCluster( + request.clusterAlias(), + request.sessionId(), + (CancellableTask) task, + request.configuration(), + (ExchangeSinkExec) request.plan(), + Set.of(request.indices()), + request.originalIndices(), + computeListener + ); + } } } @@ -828,28 +795,20 @@ void runComputeOnRemoteCluster( ExchangeSinkExec plan, Set concreteIndices, String[] originalIndices, - ActionListener listener + ComputeListener computeListener ) { final var exchangeSink = exchangeService.getSinkHandler(globalSessionId); parentTask.addListener( () -> exchangeService.finishSinkHandler(globalSessionId, new TaskCancelledException(parentTask.getReasonCancelled())) ); - ThreadPool threadPool = transportService.getThreadPool(); - final var responseHeadersCollector = new ResponseHeadersCollector(threadPool.getThreadContext()); - listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); - final AtomicBoolean cancelled = new AtomicBoolean(); - final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); final String localSessionId = clusterAlias + ":" + globalSessionId; var exchangeSource = new ExchangeSourceHandler( configuration.pragmas().exchangeBufferSize(), transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) ); - try ( - Releasable ignored = exchangeSource.addEmptySink(); - RefCountingListener refs = new RefCountingListener(listener.map(unused -> new ComputeResponse(collectedProfiles))) - ) { - exchangeSink.addCompletionListener(refs.acquire()); - exchangeSource.addCompletionListener(refs.acquire()); + try (Releasable ignored = exchangeSource.addEmptySink()) { + exchangeSink.addCompletionListener(computeListener.acquireAvoid()); + exchangeSource.addCompletionListener(computeListener.acquireAvoid()); PhysicalPlan coordinatorPlan = new ExchangeSinkExec( plan.source(), plan.output(), @@ -860,13 +819,7 @@ void runComputeOnRemoteCluster( parentTask, new ComputeContext(localSessionId, clusterAlias, List.of(), configuration, exchangeSource, exchangeSink), coordinatorPlan, - cancelOnFailure(parentTask, cancelled, refs.acquire()).map(driverProfiles -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(driverProfiles); - } - return null; - }) + computeListener.acquireCompute() ); startComputeOnDataNodes( localSessionId, @@ -877,14 +830,7 @@ void runComputeOnRemoteCluster( concreteIndices, originalIndices, exchangeSource, - ActionListener.releaseAfter(refs.acquire(), exchangeSource.addEmptySink()), - () -> cancelOnFailure(parentTask, cancelled, refs.acquire()).map(r -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(r.getProfiles()); - } - return null; - }) + computeListener ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlMediaTypeParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlMediaTypeParser.java index 9f522858358fc..915efe9302a92 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlMediaTypeParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlMediaTypeParser.java @@ -13,6 +13,7 @@ import org.elasticsearch.xcontent.ParsedMediaType; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; +import org.elasticsearch.xpack.esql.arrow.ArrowFormat; import org.elasticsearch.xpack.esql.formatter.TextFormat; import java.util.Arrays; @@ -23,7 +24,7 @@ public class EsqlMediaTypeParser { public static final MediaTypeRegistry MEDIA_TYPE_REGISTRY = new MediaTypeRegistry<>().register( XContentType.values() - ).register(TextFormat.values()); + ).register(TextFormat.values()).register(new MediaType[] { ArrowFormat.INSTANCE }); /* * Since we support {@link TextFormat} and diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index fc00f5be22624..46fe229098a16 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -56,11 +56,14 @@ import org.elasticsearch.xpack.esql.action.RestEsqlGetAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlQueryAction; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; @@ -189,7 +192,11 @@ public List getNamedWriteables() { entries.add(UnsupportedAttribute.ENTRY); // TODO combine with above once these are in the same project entries.addAll(NamedExpression.getNamedWriteables()); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); // TODO combine with above once these are in the same project + entries.addAll(Expression.getNamedWriteables()); + entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); // TODO combine with above once these are in the same project entries.add(MultiTypeEsField.ENTRY); // TODO combine with EsField.getNamedWriteables() once these are in the same module + entries.addAll(EsqlScalarFunction.getNamedWriteables()); + entries.addAll(AggregateFunction.getNamedWriteables()); return entries; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 28191a394e69c..5a6812c969757 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; +import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; @@ -37,8 +37,9 @@ import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.execution.PlanExecutor; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.esql.session.Result; import java.io.IOException; import java.time.ZoneOffset; @@ -46,6 +47,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.Executor; +import java.util.function.BiConsumer; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; @@ -158,37 +160,37 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener> runPhase = (physicalPlan, resultListener) -> computeService.execute( + sessionId, + (CancellableTask) task, + physicalPlan, + configuration, + resultListener + ); + planExecutor.esql( request, sessionId, configuration, enrichPolicyResolver, - listener.delegateFailureAndWrap( - (delegate, physicalPlan) -> computeService.execute( - sessionId, - (CancellableTask) task, - physicalPlan, - configuration, - delegate.map(result -> { - List columns = physicalPlan.output() - .stream() - .map(c -> new ColumnInfo(c.qualifiedName(), EsqlDataTypes.outputType(c.dataType()))) - .toList(); - EsqlQueryResponse.Profile profile = configuration.profile() - ? new EsqlQueryResponse.Profile(result.profiles()) - : null; - if (task instanceof EsqlQueryTask asyncTask && request.keepOnCompletion()) { - String id = asyncTask.getExecutionId().getEncoded(); - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), id, false, request.async()); - } else { - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async()); - } - }) - ) - ) + runPhase, + listener.map(result -> toResponse(task, request, configuration, result)) ); } + private EsqlQueryResponse toResponse(Task task, EsqlQueryRequest request, EsqlConfiguration configuration, Result result) { + List columns = result.schema() + .stream() + .map(c -> new ColumnInfoImpl(c.qualifiedName(), c.dataType().outputType())) + .toList(); + EsqlQueryResponse.Profile profile = configuration.profile() ? new EsqlQueryResponse.Profile(result.profiles()) : null; + if (task instanceof EsqlQueryTask asyncTask && request.keepOnCompletion()) { + String id = asyncTask.getExecutionId().getEncoded(); + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), id, false, request.async()); + } + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async()); + } + /** * Returns the ID for this compute session. The ID is unique within the cluster, and is used * to identify the compute-session across nodes. The ID is just the TaskID of the task that diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java index 6545e892741d2..4cd51b676fe89 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java @@ -223,6 +223,7 @@ Stats stats() { private static class LuceneQuery extends org.apache.lucene.search.Query { final org.apache.lucene.search.Query next; private final IndexFieldData fieldData; + // mutable object for collecting stats and warnings, not really part of the query private final Stats stats; private final Warnings warnings; @@ -267,14 +268,12 @@ public boolean equals(Object obj) { return false; } SingleValueQuery.LuceneQuery other = (SingleValueQuery.LuceneQuery) obj; - return next.equals(other.next) - && fieldData.getFieldName().equals(other.fieldData.getFieldName()) - && warnings.equals(other.warnings); + return next.equals(other.next) && fieldData.getFieldName().equals(other.fieldData.getFieldName()); } @Override public int hashCode() { - return Objects.hash(classHash(), next, fieldData, warnings); + return Objects.hash(classHash(), next, fieldData.getFieldName()); } @Override @@ -437,7 +436,8 @@ private Scorer scorer(Scorer nextScorer, LeafFieldData lfd) { @Override public boolean isCacheable(LeafReaderContext ctx) { - return next.isCacheable(ctx); + // we cannot cache this query because we loose the ability of emitting warnings + return false; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java index 23d2126e39706..23de36d6d3d77 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java @@ -63,7 +63,7 @@ public QueryBuilder asBuilder() { @Override protected String innerToString() { - throw new IllegalArgumentException("SpatialRelatesQuery.innerToString() not implemented"); + return "field:" + field + ", dataType:" + dataType + ", queryRelation:" + queryRelation + ", shape:" + shape; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 0589424b37d1e..8c831cc260e03 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -28,16 +28,15 @@ import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; import org.elasticsearch.xpack.esql.core.index.IndexResolution; import org.elasticsearch.xpack.esql.core.index.MappingException; import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; +import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer; @@ -46,6 +45,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Keep; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -58,12 +58,12 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Predicate; import java.util.stream.Collectors; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.xpack.esql.core.util.ActionListeners.map; import static org.elasticsearch.xpack.esql.core.util.StringUtils.WILDCARD; public class EsqlSession { @@ -77,7 +77,7 @@ public class EsqlSession { private final PreAnalyzer preAnalyzer; private final Verifier verifier; - private final FunctionRegistry functionRegistry; + private final EsqlFunctionRegistry functionRegistry; private final LogicalPlanOptimizer logicalPlanOptimizer; private final Mapper mapper; @@ -89,7 +89,7 @@ public EsqlSession( IndexResolver indexResolver, EnrichPolicyResolver enrichPolicyResolver, PreAnalyzer preAnalyzer, - FunctionRegistry functionRegistry, + EsqlFunctionRegistry functionRegistry, LogicalPlanOptimizer logicalPlanOptimizer, Mapper mapper, Verifier verifier @@ -110,26 +110,31 @@ public String sessionId() { return sessionId; } - public void execute(EsqlQueryRequest request, ActionListener listener) { + /** + * Execute an ESQL request. + */ + public void execute( + EsqlQueryRequest request, + BiConsumer> runPhase, + ActionListener listener + ) { LOGGER.debug("ESQL query:\n{}", request.query()); - optimizedPhysicalPlan( + analyzedPlan( parse(request.query(), request.params()), - listener.map(plan -> EstimatesRowSize.estimateRowSize(0, plan.transformUp(FragmentExec.class, f -> { - QueryBuilder filter = request.filter(); - if (filter != null) { - var fragmentFilter = f.esFilter(); - // TODO: have an ESFilter and push down to EsQueryExec / EsSource - // This is an ugly hack to push the filter parameter to Lucene - // TODO: filter integration testing - filter = fragmentFilter != null ? boolQuery().filter(fragmentFilter).must(filter) : filter; - LOGGER.debug("Fold filter {} to EsQueryExec", filter); - f = f.withFilter(filter); - } - return f; - }))) + listener.delegateFailureAndWrap((next, analyzedPlan) -> executeAnalyzedPlan(request, runPhase, analyzedPlan, next)) ); } + public void executeAnalyzedPlan( + EsqlQueryRequest request, + BiConsumer> runPhase, + LogicalPlan analyzedPlan, + ActionListener listener + ) { + // TODO phased execution lands here. + runPhase.accept(logicalPlanToPhysicalPlan(analyzedPlan, request), listener); + } + private LogicalPlan parse(String query, QueryParams params) { var parsed = new EsqlParser().createStatement(query, params); LOGGER.debug("Parsed logical plan:\n{}", parsed); @@ -145,6 +150,7 @@ public void analyzedPlan(LogicalPlan parsed, ActionListener listene preAnalyze(parsed, (indices, policies) -> { Analyzer analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indices, policies), verifier); var plan = analyzer.analyze(parsed); + plan.setAnalyzed(); LOGGER.debug("Analyzed plan:\n{}", plan); return plan; }, listener); @@ -235,14 +241,12 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF parsed.forEachDown(p -> {// go over each plan top-down if (p instanceof RegexExtract re) { // for Grok and Dissect - AttributeSet dissectRefs = p.references(); - // don't add to the list of fields the extracted ones (they are not real fields in mappings) - dissectRefs.removeAll(re.extractedFields()); - references.addAll(dissectRefs); - // also remove other down-the-tree references to the extracted fields + // remove other down-the-tree references to the extracted fields for (Attribute extracted : re.extractedFields()) { references.removeIf(attr -> matchByName(attr, extracted.qualifiedName(), false)); } + // but keep the inputs needed by Grok/Dissect + references.addAll(re.input().references()); } else if (p instanceof Enrich) { AttributeSet enrichRefs = p.references(); // Enrich adds an EmptyAttribute if no match field is specified @@ -307,28 +311,41 @@ private static Set subfields(Set names) { return names.stream().filter(name -> name.endsWith(WILDCARD) == false).map(name -> name + ".*").collect(Collectors.toSet()); } - public void optimizedPlan(LogicalPlan logicalPlan, ActionListener listener) { - analyzedPlan(logicalPlan, map(listener, p -> { - var plan = logicalPlanOptimizer.optimize(p); - LOGGER.debug("Optimized logicalPlan plan:\n{}", plan); - return plan; - })); + private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan logicalPlan, EsqlQueryRequest request) { + PhysicalPlan physicalPlan = optimizedPhysicalPlan(logicalPlan); + physicalPlan = physicalPlan.transformUp(FragmentExec.class, f -> { + QueryBuilder filter = request.filter(); + if (filter != null) { + var fragmentFilter = f.esFilter(); + // TODO: have an ESFilter and push down to EsQueryExec / EsSource + // This is an ugly hack to push the filter parameter to Lucene + // TODO: filter integration testing + filter = fragmentFilter != null ? boolQuery().filter(fragmentFilter).must(filter) : filter; + LOGGER.debug("Fold filter {} to EsQueryExec", filter); + f = f.withFilter(filter); + } + return f; + }); + return EstimatesRowSize.estimateRowSize(0, physicalPlan); } - public void physicalPlan(LogicalPlan optimized, ActionListener listener) { - optimizedPlan(optimized, map(listener, p -> { - var plan = mapper.map(p); - LOGGER.debug("Physical plan:\n{}", plan); - return plan; - })); + public LogicalPlan optimizedPlan(LogicalPlan logicalPlan) { + assert logicalPlan.analyzed(); + var plan = logicalPlanOptimizer.optimize(logicalPlan); + LOGGER.debug("Optimized logicalPlan plan:\n{}", plan); + return plan; } - public void optimizedPhysicalPlan(LogicalPlan logicalPlan, ActionListener listener) { - physicalPlan(logicalPlan, map(listener, p -> { - var plan = physicalPlanOptimizer.optimize(p); - LOGGER.debug("Optimized physical plan:\n{}", plan); - return plan; - })); + public PhysicalPlan physicalPlan(LogicalPlan logicalPlan) { + var plan = mapper.map(optimizedPlan(logicalPlan)); + LOGGER.debug("Physical plan:\n{}", plan); + return plan; + } + + public PhysicalPlan optimizedPhysicalPlan(LogicalPlan logicalPlan) { + var plan = physicalPlanOptimizer.optimize(physicalPlan(logicalPlan)); + LOGGER.debug("Optimized physical plan:\n{}", plan); + return plan; } public static InvalidMappedField specificValidity(String fieldName, Map types) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java index 7cbf3987af2cb..42beb88bbe38b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Result.java @@ -7,8 +7,23 @@ package org.elasticsearch.xpack.esql.session; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.List; -public record Result(List columns, List> values) {} +/** + * Results from running a chunk of ESQL. + * @param schema "Schema" of the {@link Attribute}s that are produced by the {@link LogicalPlan} + * that was run. Each {@link Page} contains a {@link Block} of values for each + * attribute in this list. + * @param pages Actual values produced by running the ESQL. + * @param profiles {@link DriverProfile}s from all drivers that ran to produce the output. These + * are quite cheap to build, so we build them for all ESQL runs, regardless of if + * users have asked for them. But we only include them in the results if users ask + * for them. + */ +public record Result(List schema, List pages, List profiles) {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/FeatureMetric.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/FeatureMetric.java index d5c4a67b01e8b..c4d890a818ec7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/FeatureMetric.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/FeatureMetric.java @@ -7,18 +7,18 @@ package org.elasticsearch.xpack.esql.stats; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; import org.elasticsearch.xpack.esql.plan.logical.Keep; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Rename; import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.meta.MetaFunctions; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index ee28a7fe9941a..4ddef25584eea 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -35,11 +35,14 @@ public Collection dataTypes() { @Override public DataType fromEs(String typeName, TimeSeriesParams.MetricType metricType) { - if (metricType == TimeSeriesParams.MetricType.COUNTER) { - return EsqlDataTypes.getCounterType(typeName); - } else { - return EsqlDataTypes.fromName(typeName); - } + DataType type = DataType.fromEs(typeName); + /* + * If we're handling a time series COUNTER type field then convert it + * into it's counter. But *first* we have to widen it because we only + * have time series counters for `double`, `long` and `int`, not `float` + * and `half_float`, etc. + */ + return metricType == TimeSeriesParams.MetricType.COUNTER ? type.widenSmallNumeric().counter() : type; } @Override @@ -49,7 +52,7 @@ public DataType fromJava(Object value) { @Override public boolean isUnsupported(DataType type) { - return EsqlDataTypes.isUnsupported(type); + return type == DataType.UNSUPPORTED; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index 2d817d65f6ba9..8a75d3f379dd3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -8,23 +8,17 @@ import org.elasticsearch.xpack.esql.core.type.DataType; -import java.util.Collections; import java.util.Locale; -import java.util.Map; -import static java.util.stream.Collectors.toMap; -import static java.util.stream.Collectors.toUnmodifiableMap; import static org.elasticsearch.xpack.esql.core.type.DataType.BYTE; import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; -import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.FLOAT; import static org.elasticsearch.xpack.esql.core.type.DataType.HALF_FLOAT; -import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; -import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.NESTED; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; import static org.elasticsearch.xpack.esql.core.type.DataType.OBJECT; +import static org.elasticsearch.xpack.esql.core.type.DataType.PARTIAL_AGG; import static org.elasticsearch.xpack.esql.core.type.DataType.SCALED_FLOAT; import static org.elasticsearch.xpack.esql.core.type.DataType.SHORT; import static org.elasticsearch.xpack.esql.core.type.DataType.SOURCE; @@ -35,40 +29,10 @@ public final class EsqlDataTypes { - private static final Map NAME_TO_TYPE = DataType.types() - .stream() - .collect(toUnmodifiableMap(DataType::typeName, t -> t)); - - private static final Map ES_TO_TYPE; - - static { - Map map = DataType.types().stream().filter(e -> e.esType() != null).collect(toMap(DataType::esType, t -> t)); - // ES calls this 'point', but ESQL calls it 'cartesian_point' - map.put("point", DataType.CARTESIAN_POINT); - map.put("shape", DataType.CARTESIAN_SHAPE); - ES_TO_TYPE = Collections.unmodifiableMap(map); - } - private EsqlDataTypes() {} public static DataType fromTypeName(String name) { - return NAME_TO_TYPE.get(name.toLowerCase(Locale.ROOT)); - } - - public static DataType fromName(String name) { - DataType type = ES_TO_TYPE.get(name); - return type != null ? type : UNSUPPORTED; - } - - public static boolean isUnsupported(DataType type) { - return DataType.isUnsupported(type); - } - - public static String outputType(DataType type) { - if (type != null && type.esType() != null) { - return type.esType(); - } - return "unsupported"; + return DataType.fromTypeName(name.toLowerCase(Locale.ROOT)); } public static boolean isString(DataType t) { @@ -126,7 +90,8 @@ public static boolean isRepresentable(DataType t) { && t != SCALED_FLOAT && t != SOURCE && t != HALF_FLOAT - && isCounterType(t) == false; + && t != PARTIAL_AGG + && t.isCounter() == false; } public static boolean areCompatible(DataType left, DataType right) { @@ -136,27 +101,4 @@ public static boolean areCompatible(DataType left, DataType right) { return (left == NULL || right == NULL) || (isString(left) && isString(right)) || (left.isNumeric() && right.isNumeric()); } } - - public static DataType widenSmallNumericTypes(DataType type) { - if (type == BYTE || type == SHORT) { - return INTEGER; - } - if (type == HALF_FLOAT || type == FLOAT || type == SCALED_FLOAT) { - return DOUBLE; - } - return type; - } - - public static DataType getCounterType(String typeName) { - final DataType rootType = widenSmallNumericTypes(fromName(typeName)); - if (rootType == UNSUPPORTED) { - return rootType; - } - assert rootType == LONG || rootType == INTEGER || rootType == DOUBLE : rootType; - return fromTypeName("counter_" + rootType.typeName()); - } - - public static boolean isCounterType(DataType dt) { - return dt == DataType.COUNTER_LONG || dt == DataType.COUNTER_INTEGER || dt == DataType.COUNTER_DOUBLE; - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java index 2b963e7428e2b..8b2fc926379f2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; import java.util.HashMap; @@ -46,8 +45,7 @@ public MultiTypeEsField(String name, DataType dataType, boolean aggregatable, Ma } public MultiTypeEsField(StreamInput in) throws IOException { - // TODO: Change the conversion expression serialization to i.readNamedWriteable(Expression.class) once Expression is fully supported - this(in.readString(), DataType.readFrom(in), in.readBoolean(), in.readImmutableMap(i -> ((PlanStreamInput) i).readExpression())); + this(in.readString(), DataType.readFrom(in), in.readBoolean(), in.readImmutableMap(i -> i.readNamedWriteable(Expression.class))); } @Override diff --git a/x-pack/plugin/esql/src/main/plugin-metadata/plugin-security.codebases b/x-pack/plugin/esql/src/main/plugin-metadata/plugin-security.codebases new file mode 100644 index 0000000000000..ecae5129b3563 --- /dev/null +++ b/x-pack/plugin/esql/src/main/plugin-metadata/plugin-security.codebases @@ -0,0 +1 @@ +arrow: org.elasticsearch.xpack.esql.arrow.AllocationManagerShim diff --git a/x-pack/plugin/esql/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/esql/src/main/plugin-metadata/plugin-security.policy index e69de29bb2d1d..22884437add88 100644 --- a/x-pack/plugin/esql/src/main/plugin-metadata/plugin-security.policy +++ b/x-pack/plugin/esql/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +grant codeBase "${codebase.arrow}" { + // Needed for AllocationManagerShim + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; +}; diff --git a/x-pack/plugin/esql/src/main/resources/forbidden/ql-signatures.txt b/x-pack/plugin/esql/src/main/resources/forbidden/ql-signatures.txt deleted file mode 100644 index 5371b35f4e033..0000000000000 --- a/x-pack/plugin/esql/src/main/resources/forbidden/ql-signatures.txt +++ /dev/null @@ -1,5 +0,0 @@ -org.elasticsearch.xpack.esql.core.plan.logical.Aggregate @ use @org.elasticsearch.xpack.esql.plan.logical.Aggregate instead -org.elasticsearch.xpack.esql.core.plan.logical.EsRelation @ use @org.elasticsearch.xpack.esql.plan.logical.EsRelation instead -org.elasticsearch.xpack.esql.core.plan.logical.Project @ use @org.elasticsearch.xpack.esql.plan.logical.Project instead -org.elasticsearch.xpack.esql.core.plan.logical.UnresolvedRelation @ use @org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation instead -org.elasticsearch.xpack.esql.core.expression.function.Functions @ use @org.elasticsearch.xpack.esql.expression.function.Functions instead diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index fd161c8d63871..6a9b7a0e0089d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler; import org.elasticsearch.core.Releasables; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -48,17 +47,16 @@ import org.elasticsearch.xpack.esql.CsvTestUtils.ActualResults; import org.elasticsearch.xpack.esql.CsvTestUtils.Type; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.PreAnalyzer; import org.elasticsearch.xpack.esql.core.CsvSpecReader; import org.elasticsearch.xpack.esql.core.SpecReader; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; @@ -73,7 +71,7 @@ import org.elasticsearch.xpack.esql.optimizer.TestPhysicalPlanOptimizer; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -85,8 +83,9 @@ import org.elasticsearch.xpack.esql.plugin.EsqlFeatures; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.esql.session.EsqlSession; +import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.stats.DisabledSearchStats; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.junit.After; import org.junit.Before; import org.mockito.Mockito; @@ -101,6 +100,7 @@ import java.util.TreeMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import static org.elasticsearch.xpack.esql.CsvTestUtils.ExpectedResults; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; @@ -157,11 +157,12 @@ public class CsvTests extends ESTestCase { private final String testName; private final Integer lineNumber; private final CsvSpecReader.CsvTestCase testCase; + private final String instructions; private final EsqlConfiguration configuration = EsqlTestUtils.configuration( new QueryPragmas(Settings.builder().put("page_size", randomPageSize()).build()) ); - private final FunctionRegistry functionRegistry = new EsqlFunctionRegistry(); + private final EsqlFunctionRegistry functionRegistry = new EsqlFunctionRegistry(); private final EsqlParser parser = new EsqlParser(); private final Mapper mapper = new Mapper(functionRegistry); private final PhysicalPlanOptimizer physicalPlanOptimizer = new TestPhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); @@ -211,17 +212,25 @@ private int randomPageSize() { } } - public CsvTests(String fileName, String groupName, String testName, Integer lineNumber, CsvSpecReader.CsvTestCase testCase) { + public CsvTests( + String fileName, + String groupName, + String testName, + Integer lineNumber, + CsvSpecReader.CsvTestCase testCase, + String instructions + ) { this.fileName = fileName; this.groupName = groupName; this.testName = testName; this.lineNumber = lineNumber; this.testCase = testCase; + this.instructions = instructions; } public final void test() throws Throwable { try { - assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, Version.CURRENT)); + assumeTrue("Test " + testName + " is not enabled", isEnabled(testName, instructions, Version.CURRENT)); /* * The csv tests support all but a few features. The unsupported features * are tested in integration tests. @@ -240,6 +249,15 @@ public final void test() throws Throwable { testCase.requiredCapabilities, everyItem(in(EsqlCapabilities.CAPABILITIES)) ); + } else { + for (EsqlCapabilities.Cap c : EsqlCapabilities.Cap.values()) { + if (c.snapshotOnly()) { + assumeFalse( + c.capabilityName() + " is not supported in non-snapshot releases", + testCase.requiredCapabilities.contains(c.capabilityName()) + ); + } + } } doTest(); @@ -322,16 +340,14 @@ private static EnrichPolicy loadEnrichPolicyMapping(String policyFileName) { } } - private PhysicalPlan physicalPlan(LogicalPlan parsed, CsvTestsDataLoader.TestsDataset dataset) { + private LogicalPlan analyzedPlan(LogicalPlan parsed, CsvTestsDataLoader.TestsDataset dataset) { var indexResolution = loadIndexResolution(dataset.mappingFileName(), dataset.indexName()); var enrichPolicies = loadEnrichPolicies(); var analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indexResolution, enrichPolicies), TEST_VERIFIER); - var analyzed = analyzer.analyze(parsed); - var logicalOptimized = new LogicalPlanOptimizer(new LogicalOptimizerContext(configuration)).optimize(analyzed); - var physicalPlan = mapper.map(logicalOptimized); - var optimizedPlan = EstimatesRowSize.estimateRowSize(0, physicalPlanOptimizer.optimize(physicalPlan)); - opportunisticallyAssertPlanSerialization(physicalPlan, optimizedPlan); // comment out to disable serialization - return optimizedPlan; + LogicalPlan plan = analyzer.analyze(parsed); + plan.setAnalyzed(); + LOGGER.debug("Analyzed plan:\n{}", plan); + return plan; } private static CsvTestsDataLoader.TestsDataset testsDataset(LogicalPlan parsed) { @@ -373,90 +389,43 @@ private static TestPhysicalOperationProviders testOperationProviders(CsvTestsDat } private ActualResults executePlan(BigArrays bigArrays) throws Exception { - var parsed = parser.createStatement(testCase.query); + LogicalPlan parsed = parser.createStatement(testCase.query); var testDataset = testsDataset(parsed); + LogicalPlan analyzed = analyzedPlan(parsed, testDataset); - String sessionId = "csv-test"; - BlockFactory blockFactory = new BlockFactory( - bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST), - bigArrays, - ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) - ); - ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor); - ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(blockFactory, between(1, 64), threadPool::relativeTimeInMillis); - LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( - sessionId, - "", - new CancellableTask(1, "transport", "esql", null, TaskId.EMPTY_TASK_ID, Map.of()), - bigArrays, - blockFactory, - randomNodeSettings(), + EsqlSession session = new EsqlSession( + getTestName(), configuration, - exchangeSource, - exchangeSink, - Mockito.mock(EnrichLookupService.class), - testOperationProviders(testDataset) + null, + null, + null, + functionRegistry, + new LogicalPlanOptimizer(new LogicalOptimizerContext(configuration)), + mapper, + TEST_VERIFIER ); - // - // Keep in sync with ComputeService#execute - // - PhysicalPlan physicalPlan = physicalPlan(parsed, testDataset); - Tuple coordinatorAndDataNodePlan = PlannerUtils.breakPlanBetweenCoordinatorAndDataNode( - physicalPlan, - configuration + TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(testDataset); + + PlainActionFuture listener = new PlainActionFuture<>(); + + session.executeAnalyzedPlan( + new EsqlQueryRequest(), + runPhase(bigArrays, physicalOperationProviders), + analyzed, + listener.delegateFailureAndWrap( + // Wrap so we can capture the warnings in the calling thread + (next, result) -> next.onResponse( + new ActualResults( + result.schema().stream().map(Attribute::name).toList(), + result.schema().stream().map(a -> Type.asType(a.dataType().nameUpper())).toList(), + result.schema().stream().map(Attribute::dataType).toList(), + result.pages(), + threadPool.getThreadContext().getResponseHeaders() + ) + ) + ) ); - PhysicalPlan coordinatorPlan = coordinatorAndDataNodePlan.v1(); - PhysicalPlan dataNodePlan = coordinatorAndDataNodePlan.v2(); - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Coordinator plan\n" + coordinatorPlan); - LOGGER.trace("DataNode plan\n" + dataNodePlan); - } - - List columnNames = Expressions.names(coordinatorPlan.output()); - List dataTypes = new ArrayList<>(columnNames.size()); - List columnTypes = coordinatorPlan.output() - .stream() - .peek(o -> dataTypes.add(EsqlDataTypes.outputType(o.dataType()))) - .map(o -> Type.asType(o.dataType().nameUpper())) - .toList(); - - List drivers = new ArrayList<>(); - List collectedPages = Collections.synchronizedList(new ArrayList<>()); - - // replace fragment inside the coordinator plan - try { - LocalExecutionPlan coordinatorNodeExecutionPlan = executionPlanner.plan(new OutputExec(coordinatorPlan, collectedPages::add)); - drivers.addAll(coordinatorNodeExecutionPlan.createDrivers(sessionId)); - if (dataNodePlan != null) { - var searchStats = new DisabledSearchStats(); - var logicalTestOptimizer = new LocalLogicalPlanOptimizer(new LocalLogicalOptimizerContext(configuration, searchStats)); - var physicalTestOptimizer = new TestLocalPhysicalPlanOptimizer( - new LocalPhysicalOptimizerContext(configuration, searchStats) - ); - - var csvDataNodePhysicalPlan = PlannerUtils.localPlan(dataNodePlan, logicalTestOptimizer, physicalTestOptimizer); - exchangeSource.addRemoteSink(exchangeSink::fetchPageAsync, randomIntBetween(1, 3)); - LocalExecutionPlan dataNodeExecutionPlan = executionPlanner.plan(csvDataNodePhysicalPlan); - drivers.addAll(dataNodeExecutionPlan.createDrivers(sessionId)); - Randomness.shuffle(drivers); - } - // Execute the driver - DriverRunner runner = new DriverRunner(threadPool.getThreadContext()) { - @Override - protected void start(Driver driver, ActionListener driverListener) { - Driver.start(threadPool.getThreadContext(), executor, driver, between(1, 1000), driverListener); - } - }; - PlainActionFuture future = new PlainActionFuture<>(); - runner.runToCompletion(drivers, ActionListener.releaseAfter(future, () -> Releasables.close(drivers)).map(ignore -> { - var responseHeaders = threadPool.getThreadContext().getResponseHeaders(); - return new ActualResults(columnNames, columnTypes, dataTypes, collectedPages, responseHeaders); - })); - return future.actionGet(TimeValue.timeValueSeconds(30)); - } finally { - Releasables.close(() -> Releasables.close(drivers)); - } + return listener.get(); } private Settings randomNodeSettings() { @@ -479,17 +448,15 @@ private Throwable reworkException(Throwable th) { } // Asserts that the serialization and deserialization of the plan creates an equivalent plan. - private void opportunisticallyAssertPlanSerialization(PhysicalPlan... plans) { - for (var plan : plans) { - var tmp = plan; - do { - if (tmp instanceof LocalSourceExec) { - return; // skip plans with localSourceExec - } - } while (tmp.children().isEmpty() == false && (tmp = tmp.children().get(0)) != null); + private void opportunisticallyAssertPlanSerialization(PhysicalPlan plan) { + var tmp = plan; + do { + if (tmp instanceof LocalSourceExec) { + return; // skip plans with localSourceExec + } + } while (tmp.children().isEmpty() == false && (tmp = tmp.children().get(0)) != null); - SerializationTestUtils.assertSerialization(plan, configuration); - } + SerializationTestUtils.assertSerialization(plan, configuration); } private void assertWarnings(List warnings) { @@ -501,6 +468,84 @@ private void assertWarnings(List warnings) { normalized.add(normW); } } - EsqlTestUtils.assertWarnings(normalized, testCase.expectedWarnings(true), testCase.expectedWarningsRegex()); + EsqlTestUtils.assertWarnings(normalized, testCase.expectedWarnings(), testCase.expectedWarningsRegex()); + } + + BiConsumer> runPhase( + BigArrays bigArrays, + TestPhysicalOperationProviders physicalOperationProviders + ) { + return (physicalPlan, listener) -> runPhase(bigArrays, physicalOperationProviders, physicalPlan, listener); + } + + void runPhase( + BigArrays bigArrays, + TestPhysicalOperationProviders physicalOperationProviders, + PhysicalPlan physicalPlan, + ActionListener listener + ) { + // Keep in sync with ComputeService#execute + opportunisticallyAssertPlanSerialization(physicalPlan); + Tuple coordinatorAndDataNodePlan = PlannerUtils.breakPlanBetweenCoordinatorAndDataNode( + physicalPlan, + configuration + ); + PhysicalPlan coordinatorPlan = coordinatorAndDataNodePlan.v1(); + PhysicalPlan dataNodePlan = coordinatorAndDataNodePlan.v2(); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Coordinator plan\n" + coordinatorPlan); + LOGGER.trace("DataNode plan\n" + dataNodePlan); + } + + BlockFactory blockFactory = new BlockFactory( + bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST), + bigArrays, + ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) + ); + ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor); + ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(blockFactory, between(1, 64), threadPool::relativeTimeInMillis); + + LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( + getTestName(), + "", + new CancellableTask(1, "transport", "esql", null, TaskId.EMPTY_TASK_ID, Map.of()), + bigArrays, + blockFactory, + randomNodeSettings(), + configuration, + exchangeSource, + exchangeSink, + Mockito.mock(EnrichLookupService.class), + physicalOperationProviders + ); + + List collectedPages = Collections.synchronizedList(new ArrayList<>()); + + // replace fragment inside the coordinator plan + List drivers = new ArrayList<>(); + LocalExecutionPlan coordinatorNodeExecutionPlan = executionPlanner.plan(new OutputExec(coordinatorPlan, collectedPages::add)); + drivers.addAll(coordinatorNodeExecutionPlan.createDrivers(getTestName())); + if (dataNodePlan != null) { + var searchStats = new DisabledSearchStats(); + var logicalTestOptimizer = new LocalLogicalPlanOptimizer(new LocalLogicalOptimizerContext(configuration, searchStats)); + var physicalTestOptimizer = new TestLocalPhysicalPlanOptimizer(new LocalPhysicalOptimizerContext(configuration, searchStats)); + + var csvDataNodePhysicalPlan = PlannerUtils.localPlan(dataNodePlan, logicalTestOptimizer, physicalTestOptimizer); + exchangeSource.addRemoteSink(exchangeSink::fetchPageAsync, randomIntBetween(1, 3)); + LocalExecutionPlan dataNodeExecutionPlan = executionPlanner.plan(csvDataNodePhysicalPlan); + + drivers.addAll(dataNodeExecutionPlan.createDrivers(getTestName())); + Randomness.shuffle(drivers); + } + // Execute the drivers + DriverRunner runner = new DriverRunner(threadPool.getThreadContext()) { + @Override + protected void start(Driver driver, ActionListener driverListener) { + Driver.start(threadPool.getThreadContext(), executor, driver, between(1, 1000), driverListener); + } + }; + listener = ActionListener.releaseAfter(listener, () -> Releasables.close(drivers)); + runner.runToCompletion(drivers, listener.map(ignore -> new Result(physicalPlan.output(), collectedPages, List.of()))); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index a614ff3c621f8..8c5a5a4b3ba3b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -26,12 +26,14 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -71,8 +73,8 @@ public static void assertSerialization(Expression expression) { public static void assertSerialization(Expression expression, EsqlConfiguration configuration) { Expression deserExpression = serializeDeserialize( expression, - PlanStreamOutput::writeExpression, - PlanStreamInput::readExpression, + PlanStreamOutput::writeNamedWriteable, + in -> in.readNamedWriteable(Expression.class), configuration ); EqualsHashCodeTestUtils.checkEqualsAndHashCode(expression, unused -> deserExpression); @@ -90,7 +92,7 @@ public static T serializeDeserialize(T orig, Serializer serializer, Deser ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), writableRegistry() ); - PlanStreamInput planStreamInput = new PlanStreamInput(in, planNameRegistry, writableRegistry(), config); + PlanStreamInput planStreamInput = new PlanStreamInput(in, planNameRegistry, in.namedWriteableRegistry(), config); return deserializer.read(planStreamInput); } catch (IOException e) { throw new UncheckedIOException(e); @@ -121,6 +123,9 @@ public static NamedWriteableRegistry writableRegistry() { entries.add(UnsupportedAttribute.ENTRY); entries.addAll(NamedExpression.getNamedWriteables()); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); + entries.addAll(Expression.getNamedWriteables()); + entries.addAll(EsqlScalarFunction.getNamedWriteables()); + entries.addAll(AggregateFunction.getNamedWriteables()); return new NamedWriteableRegistry(entries); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index a8ad53b8bc663..890a611fdea10 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -34,8 +34,11 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.esql.Column; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.parser.QueryParam; +import org.elasticsearch.xpack.esql.parser.QueryParams; import java.io.IOException; import java.util.ArrayList; @@ -76,9 +79,9 @@ public void testParseFields() throws IOException { assertEquals(locale.toLanguageTag(), request.locale().toLanguageTag()); assertEquals(locale, request.locale()); assertEquals(filter, request.filter()); - assertEquals(params.size(), request.params().positionalParams().size()); + assertEquals(params.size(), request.params().size()); for (int i = 0; i < params.size(); i++) { - assertEquals(params.get(i), request.params().positionalParams().get(i)); + assertEquals(params.get(i), request.params().get(i + 1)); } } @@ -114,10 +117,10 @@ public void testNamedParams() throws IOException { assertEquals(locale.toLanguageTag(), request.locale().toLanguageTag()); assertEquals(locale, request.locale()); assertEquals(filter, request.filter()); - assertEquals(params.size(), request.params().positionalParams().size()); + assertEquals(params.size(), request.params().size()); - for (int i = 0; i < request.params().positionalParams().size(); i++) { - assertEquals(params.get(i), request.params().positionalParams().get(i)); + for (int i = 0; i < request.params().size(); i++) { + assertEquals(params.get(i), request.params().get(i + 1)); } } @@ -182,6 +185,20 @@ public void testInvalidParams() throws IOException { ); } + // Test for https://github.com/elastic/elasticsearch/issues/110028 + public void testNamedParamsMutation() { + EsqlQueryRequest request1 = new EsqlQueryRequest(); + assertThat(request1.params(), equalTo(new QueryParams())); + var exceptionMessage = randomAlphaOfLength(10); + var paramName = randomAlphaOfLength(5); + var paramValue = randomAlphaOfLength(5); + request1.params().addParsingError(new ParsingException(Source.EMPTY, exceptionMessage)); + request1.params().addTokenParam(null, new QueryParam(paramName, paramValue, DataType.KEYWORD)); + + EsqlQueryRequest request2 = new EsqlQueryRequest(); + assertThat(request2.params(), equalTo(new QueryParams())); + } + public void testParseFieldsForAsync() throws IOException { String query = randomAlphaOfLengthBetween(1, 100); boolean columnar = randomBoolean(); @@ -226,9 +243,9 @@ public void testParseFieldsForAsync() throws IOException { assertEquals(keepOnCompletion, request.keepOnCompletion()); assertEquals(waitForCompletion, request.waitForCompletionTimeout()); assertEquals(keepAlive, request.keepAlive()); - assertEquals(params.size(), request.params().positionalParams().size()); + assertEquals(params.size(), request.params().size()); for (int i = 0; i < params.size(); i++) { - assertEquals(params.get(i), request.params().positionalParams().get(i)); + assertEquals(params.get(i), request.params().get(i + 1)); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 9bc792ffe04aa..cff4d274dc49c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -44,11 +44,9 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.planner.PlannerUtils; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.versionfield.Version; import org.junit.After; import org.junit.Before; @@ -110,7 +108,7 @@ EsqlQueryResponse randomResponse(boolean columnar, EsqlQueryResponse.Profile pro EsqlQueryResponse randomResponseAsync(boolean columnar, EsqlQueryResponse.Profile profile, boolean async) { int noCols = randomIntBetween(1, 10); - List columns = randomList(noCols, noCols, this::randomColumnInfo); + List columns = randomList(noCols, noCols, this::randomColumnInfo); int noPages = randomIntBetween(1, 20); List values = randomList(noPages, noPages, () -> randomPage(columns)); String id = null; @@ -122,13 +120,12 @@ EsqlQueryResponse randomResponseAsync(boolean columnar, EsqlQueryResponse.Profil return new EsqlQueryResponse(columns, values, profile, columnar, id, isRunning, async); } - private ColumnInfo randomColumnInfo() { + private ColumnInfoImpl randomColumnInfo() { DataType type = randomValueOtherThanMany( - t -> false == DataType.isPrimitive(t) || t == DataType.DATE_PERIOD || t == DataType.TIME_DURATION, + t -> false == DataType.isPrimitive(t) || t == DataType.DATE_PERIOD || t == DataType.TIME_DURATION || t == DataType.PARTIAL_AGG, () -> randomFrom(DataType.types()) - ); - type = EsqlDataTypes.widenSmallNumericTypes(type); - return new ColumnInfo(randomAlphaOfLength(10), type.esType()); + ).widenSmallNumeric(); + return new ColumnInfoImpl(randomAlphaOfLength(10), type.esType()); } private EsqlQueryResponse.Profile randomProfile() { @@ -138,34 +135,34 @@ private EsqlQueryResponse.Profile randomProfile() { return new EsqlQueryResponseProfileTests().createTestInstance(); } - private Page randomPage(List columns) { + private Page randomPage(List columns) { return new Page(columns.stream().map(c -> { - Block.Builder builder = PlannerUtils.toElementType(EsqlDataTypes.fromName(c.type())).newBlockBuilder(1, blockFactory); + Block.Builder builder = PlannerUtils.toElementType(c.type()).newBlockBuilder(1, blockFactory); switch (c.type()) { - case "unsigned_long", "long", "counter_long" -> ((LongBlock.Builder) builder).appendLong(randomLong()); - case "integer", "counter_integer" -> ((IntBlock.Builder) builder).appendInt(randomInt()); - case "double", "counter_double" -> ((DoubleBlock.Builder) builder).appendDouble(randomDouble()); - case "keyword" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new BytesRef(randomAlphaOfLength(10))); - case "text" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new BytesRef(randomAlphaOfLength(10000))); - case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + case UNSIGNED_LONG, LONG, COUNTER_LONG -> ((LongBlock.Builder) builder).appendLong(randomLong()); + case INTEGER, COUNTER_INTEGER -> ((IntBlock.Builder) builder).appendInt(randomInt()); + case DOUBLE, COUNTER_DOUBLE -> ((DoubleBlock.Builder) builder).appendDouble(randomDouble()); + case KEYWORD -> ((BytesRefBlock.Builder) builder).appendBytesRef(new BytesRef(randomAlphaOfLength(10))); + case TEXT -> ((BytesRefBlock.Builder) builder).appendBytesRef(new BytesRef(randomAlphaOfLength(10000))); + case IP -> ((BytesRefBlock.Builder) builder).appendBytesRef( new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))) ); - case "date" -> ((LongBlock.Builder) builder).appendLong(randomInstant().toEpochMilli()); - case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(randomBoolean()); - case "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + case DATETIME -> ((LongBlock.Builder) builder).appendLong(randomInstant().toEpochMilli()); + case BOOLEAN -> ((BooleanBlock.Builder) builder).appendBoolean(randomBoolean()); + case UNSUPPORTED -> ((BytesRefBlock.Builder) builder).appendBytesRef( new BytesRef(UnsupportedValueSource.UNSUPPORTED_OUTPUT) ); - case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); - case "geo_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef(GEO.asWkb(GeometryTestUtils.randomPoint())); - case "cartesian_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef(CARTESIAN.asWkb(ShapeTestUtils.randomPoint())); - case "geo_shape" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + case VERSION -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); + case GEO_POINT -> ((BytesRefBlock.Builder) builder).appendBytesRef(GEO.asWkb(GeometryTestUtils.randomPoint())); + case CARTESIAN_POINT -> ((BytesRefBlock.Builder) builder).appendBytesRef(CARTESIAN.asWkb(ShapeTestUtils.randomPoint())); + case GEO_SHAPE -> ((BytesRefBlock.Builder) builder).appendBytesRef( GEO.asWkb(GeometryTestUtils.randomGeometry(randomBoolean())) ); - case "cartesian_shape" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + case CARTESIAN_SHAPE -> ((BytesRefBlock.Builder) builder).appendBytesRef( CARTESIAN.asWkb(ShapeTestUtils.randomGeometry(randomBoolean())) ); - case "null" -> builder.appendNull(); - case "_source" -> { + case NULL -> builder.appendNull(); + case SOURCE -> { try { ((BytesRefBlock.Builder) builder).appendBytesRef( BytesReference.bytes( @@ -179,7 +176,7 @@ private Page randomPage(List columns) { throw new UncheckedIOException(e); } } - default -> throw new UnsupportedOperationException("unsupported data type [" + c + "]"); + // default -> throw new UnsupportedOperationException("unsupported data type [" + c + "]"); } return builder.build(); }).toArray(Block[]::new)); @@ -188,17 +185,17 @@ private Page randomPage(List columns) { @Override protected EsqlQueryResponse mutateInstance(EsqlQueryResponse instance) { boolean allNull = true; - for (ColumnInfo info : instance.columns()) { - if (false == info.type().equals("null")) { + for (ColumnInfoImpl info : instance.columns()) { + if (info.type() != DataType.NULL) { allNull = false; } } return switch (allNull ? between(0, 2) : between(0, 3)) { case 0 -> { int mutCol = between(0, instance.columns().size() - 1); - List cols = new ArrayList<>(instance.columns()); + List cols = new ArrayList<>(instance.columns()); // keep the type the same so the values are still valid but change the name - cols.set(mutCol, new ColumnInfo(cols.get(mutCol).name() + "mut", cols.get(mutCol).type())); + cols.set(mutCol, new ColumnInfoImpl(cols.get(mutCol).name() + "mut", cols.get(mutCol).type())); yield new EsqlQueryResponse(cols, deepCopyOfPages(instance), instance.profile(), instance.columnar(), instance.isAsync()); } case 1 -> new EsqlQueryResponse( @@ -275,7 +272,7 @@ public static class ResponseBuilder { IS_RUNNING, ObjectParser.ValueType.BOOLEAN_OR_NULL ); - parser.declareObjectArray(constructorArg(), (p, c) -> ColumnInfo.fromXContent(p), new ParseField("columns")); + parser.declareObjectArray(constructorArg(), (p, c) -> ColumnInfoImpl.fromXContent(p), new ParseField("columns")); parser.declareField(constructorArg(), (p, c) -> p.list(), new ParseField("values"), ObjectParser.ValueType.OBJECT_ARRAY); PARSER = parser.build(); } @@ -284,7 +281,12 @@ public static class ResponseBuilder { private final EsqlQueryResponse response; @ParserConstructor - public ResponseBuilder(@Nullable String asyncExecutionId, Boolean isRunning, List columns, List> values) { + public ResponseBuilder( + @Nullable String asyncExecutionId, + Boolean isRunning, + List columns, + List> values + ) { this.response = new EsqlQueryResponse( columns, List.of(valuesToPage(TestBlockFactory.getNonBreakingInstance(), columns, values)), @@ -374,7 +376,7 @@ public void testSimpleXContentRowsAsync() { public void testBasicXContentIdAndRunning() { try ( EsqlQueryResponse response = new EsqlQueryResponse( - List.of(new ColumnInfo("foo", "integer")), + List.of(new ColumnInfoImpl("foo", "integer")), List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), null, false, @@ -391,7 +393,7 @@ public void testBasicXContentIdAndRunning() { public void testNullColumnsXContentDropNulls() { try ( EsqlQueryResponse response = new EsqlQueryResponse( - List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("all_null", "integer")), + List.of(new ColumnInfoImpl("foo", "integer"), new ColumnInfoImpl("all_null", "integer")), List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock(), blockFactory.newConstantNullBlock(2))), null, false, @@ -420,7 +422,7 @@ public void testNullColumnsFromBuilderXContentDropNulls() { b.appendNull(); try ( EsqlQueryResponse response = new EsqlQueryResponse( - List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("all_null", "integer")), + List.of(new ColumnInfoImpl("foo", "integer"), new ColumnInfoImpl("all_null", "integer")), List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock(), b.build())), null, false, @@ -446,7 +448,7 @@ private EsqlQueryResponse simple(boolean columnar) { private EsqlQueryResponse simple(boolean columnar, boolean async) { return new EsqlQueryResponse( - List.of(new ColumnInfo("foo", "integer")), + List.of(new ColumnInfoImpl("foo", "integer")), List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), null, columnar, @@ -457,7 +459,7 @@ private EsqlQueryResponse simple(boolean columnar, boolean async) { public void testProfileXContent() { try ( EsqlQueryResponse response = new EsqlQueryResponse( - List.of(new ColumnInfo("foo", "integer")), + List.of(new ColumnInfoImpl("foo", "integer")), List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), new EsqlQueryResponse.Profile( List.of( @@ -522,7 +524,7 @@ public void testColumns() { var intBlk2 = blockFactory.newIntArrayVector(new int[] { 30, 40, 50 }, 3).asBlock(); var longBlk1 = blockFactory.newLongArrayVector(new long[] { 100L, 200L }, 2).asBlock(); var longBlk2 = blockFactory.newLongArrayVector(new long[] { 300L, 400L, 500L }, 3).asBlock(); - var columnInfo = List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("bar", "long")); + var columnInfo = List.of(new ColumnInfoImpl("foo", "integer"), new ColumnInfoImpl("bar", "long")); var pages = List.of(new Page(intBlk1, longBlk1), new Page(intBlk2, longBlk2)); try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { assertThat(columnValues(response.column(0)), contains(10, 20, 30, 40, 50)); @@ -534,7 +536,7 @@ public void testColumns() { public void testColumnsIllegalArg() { var intBlk1 = blockFactory.newIntArrayVector(new int[] { 10 }, 1).asBlock(); - var columnInfo = List.of(new ColumnInfo("foo", "integer")); + var columnInfo = List.of(new ColumnInfoImpl("foo", "integer")); var pages = List.of(new Page(intBlk1)); try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { expectThrows(IllegalArgumentException.class, () -> response.column(-1)); @@ -553,7 +555,7 @@ public void testColumnsWithNull() { blk2 = bb2.appendInt(30).appendNull().appendNull().appendInt(60).build(); blk3 = bb3.appendNull().appendInt(80).appendInt(90).appendNull().build(); } - var columnInfo = List.of(new ColumnInfo("foo", "integer")); + var columnInfo = List.of(new ColumnInfoImpl("foo", "integer")); var pages = List.of(new Page(blk1), new Page(blk2), new Page(blk3)); try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { assertThat(columnValues(response.column(0)), contains(10, null, 30, null, null, 60, null, 80, 90, null)); @@ -573,7 +575,7 @@ public void testColumnsWithMultiValue() { blk2 = bb2.beginPositionEntry().appendInt(40).appendInt(50).endPositionEntry().build(); blk3 = bb3.appendNull().appendInt(70).appendInt(80).appendNull().build(); } - var columnInfo = List.of(new ColumnInfo("foo", "integer")); + var columnInfo = List.of(new ColumnInfoImpl("foo", "integer")); var pages = List.of(new Page(blk1), new Page(blk2), new Page(blk3)); try (var response = new EsqlQueryResponse(columnInfo, pages, null, false, null, false, false)) { assertThat(columnValues(response.column(0)), contains(List.of(10, 20), null, List.of(40, 50), null, 70, 80, null)); @@ -585,7 +587,7 @@ public void testColumnsWithMultiValue() { public void testRowValues() { for (int times = 0; times < 10; times++) { int numColumns = randomIntBetween(1, 10); - List columns = randomList(numColumns, numColumns, this::randomColumnInfo); + List columns = randomList(numColumns, numColumns, this::randomColumnInfo); int noPages = randomIntBetween(1, 20); List pages = randomList(noPages, noPages, () -> randomPage(columns)); try (var resp = new EsqlQueryResponse(columns, pages, null, false, "", false, false)) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index c78baabcd03a7..7c5dc73fb62af 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -11,11 +11,11 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 794bdc23f08c5..c8c4179d2cdc9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -32,10 +32,6 @@ import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.TypesTests; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; @@ -47,10 +43,14 @@ import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.EsqlUnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.IndexResolver; @@ -90,11 +90,13 @@ //@TestLogging(value = "org.elasticsearch.xpack.esql.analysis:TRACE", reason = "debug") public class AnalyzerTests extends ESTestCase { - private static final EsqlUnresolvedRelation UNRESOLVED_RELATION = new EsqlUnresolvedRelation( + private static final UnresolvedRelation UNRESOLVED_RELATION = new UnresolvedRelation( EMPTY, new TableIdentifier(EMPTY, null, "idx"), + false, List.of(), - IndexMode.STANDARD + IndexMode.STANDARD, + null ); private static final int MAX_LIMIT = EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY); @@ -1832,13 +1834,13 @@ public void testUnsupportedTypesInStats() { found value [x] type [unsigned_long] line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long or counter types],\ found value [x] type [unsigned_long] - line 2:39: argument of [max(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:39: argument of [max(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [max(x)] type [unsigned_long] line 2:47: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] line 2:58: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] - line 2:88: argument of [min(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:88: argument of [min(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [min(x)] type [unsigned_long] line 2:96: first argument of [percentile(x, 10)] must be [numeric except unsigned_long],\ found value [x] type [unsigned_long] @@ -1852,13 +1854,13 @@ public void testUnsupportedTypesInStats() { Found 7 problems line 2:10: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:18: argument of [max(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:18: argument of [max(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [max(x)] type [version] line 2:26: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] line 2:37: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] - line 2:67: argument of [min(x)] must be [datetime or numeric except unsigned_long or counter types],\ + line 2:67: argument of [min(x)] must be [boolean, datetime or numeric except unsigned_long or counter types],\ found value [min(x)] type [version] line 2:75: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] line 2:94: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); @@ -1931,7 +1933,7 @@ public void testLookup() { | LOOKUP int_number_names ON int """; if (Build.current().isProductionRelease()) { - var e = expectThrows(VerificationException.class, () -> analyze(query)); + var e = expectThrows(ParsingException.class, () -> analyze(query)); assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); return; } @@ -1982,39 +1984,45 @@ public void testLookup() { } public void testLookupMissingField() { - var e = expectThrows(VerificationException.class, () -> analyze(""" + String query = """ FROM test | LOOKUP int_number_names ON garbage - """)); + """; if (Build.current().isProductionRelease()) { - assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); + var e = expectThrows(ParsingException.class, () -> analyze(query)); + assertThat(e.getMessage(), containsString("line 2:4: LOOKUP is in preview and only available in SNAPSHOT build")); return; } + var e = expectThrows(VerificationException.class, () -> analyze(query)); assertThat(e.getMessage(), containsString("Unknown column in lookup target [garbage]")); } public void testLookupMissingTable() { - var e = expectThrows(VerificationException.class, () -> analyze(""" + String query = """ FROM test | LOOKUP garbage ON a - """)); + """; if (Build.current().isProductionRelease()) { - assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); + var e = expectThrows(ParsingException.class, () -> analyze(query)); + assertThat(e.getMessage(), containsString("line 2:4: LOOKUP is in preview and only available in SNAPSHOT build")); return; } + var e = expectThrows(VerificationException.class, () -> analyze(query)); assertThat(e.getMessage(), containsString("Unknown table [garbage]")); } public void testLookupMatchTypeWrong() { - var e = expectThrows(VerificationException.class, () -> analyze(""" + String query = """ FROM test | RENAME last_name AS int | LOOKUP int_number_names ON int - """)); + """; if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> analyze(query)); assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); return; } + var e = expectThrows(VerificationException.class, () -> analyze(query)); assertThat(e.getMessage(), containsString("column type mismatch, table column was [integer] and original column was [keyword]")); } @@ -2068,6 +2076,20 @@ public void testImplicitCasting() { assertThat(e.getMessage(), containsString("[+] has arguments with incompatible types [datetime] and [datetime]")); } + public void testRateRequiresCounterTypes() { + assumeTrue("rate requires snapshot builds", Build.current().isSnapshot()); + Analyzer analyzer = analyzer(tsdbIndexResolution()); + var query = "METRICS test avg(rate(network.connections))"; + VerificationException error = expectThrows(VerificationException.class, () -> analyze(query, analyzer)); + assertThat( + error.getMessage(), + containsString( + "first argument of [rate(network.connections)] must be" + + " [counter_long, counter_integer or counter_double], found value [network.connections] type [long]" + ) + ); + } + private void verifyUnsupported(String query, String errorMessage) { verifyUnsupported(query, errorMessage, "mapping-multi-field-variation.json"); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 27a42f79e39ff..0231dc1f4a82b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -13,14 +13,14 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.core.ParsingException; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionDefinition; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.TypesTests; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.expression.function.FunctionDefinition; import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 8eef05bd9687b..00d12240e67e5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.Build; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -492,7 +493,8 @@ public void testAggregateOnCounter() { assertThat( error("FROM tests | STATS min(network.bytes_in)", tsdb), equalTo( - "1:20: argument of [min(network.bytes_in)] must be [datetime or numeric except unsigned_long or counter types]," + "1:20: argument of [min(network.bytes_in)] must be" + + " [boolean, datetime or numeric except unsigned_long or counter types]," + " found value [min(network.bytes_in)] type [counter_long]" ) ); @@ -500,7 +502,8 @@ public void testAggregateOnCounter() { assertThat( error("FROM tests | STATS max(network.bytes_in)", tsdb), equalTo( - "1:20: argument of [max(network.bytes_in)] must be [datetime or numeric except unsigned_long or counter types]," + "1:20: argument of [max(network.bytes_in)] must be" + + " [boolean, datetime or numeric except unsigned_long or counter types]," + " found value [max(network.bytes_in)] type [counter_long]" ) ); @@ -548,6 +551,83 @@ public void testAggsResolutionWithUnresolvedGroupings() { ); } + public void testNotAllowRateOutsideMetrics() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + assertThat( + error("FROM tests | STATS avg(rate(network.bytes_in))", tsdb), + equalTo("1:24: the rate aggregate[rate(network.bytes_in)] can only be used within the metrics command") + ); + assertThat( + error("METRICS tests | STATS sum(rate(network.bytes_in))", tsdb), + equalTo("1:27: the rate aggregate[rate(network.bytes_in)] can only be used within the metrics command") + ); + assertThat( + error("FROM tests | STATS rate(network.bytes_in)", tsdb), + equalTo("1:20: the rate aggregate[rate(network.bytes_in)] can only be used within the metrics command") + ); + assertThat( + error("FROM tests | EVAL r = rate(network.bytes_in)", tsdb), + equalTo("1:23: aggregate function [rate(network.bytes_in)] not allowed outside METRICS command") + ); + } + + public void testRateNotEnclosedInAggregate() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + assertThat( + error("METRICS tests rate(network.bytes_in)", tsdb), + equalTo( + "1:15: the rate aggregate [rate(network.bytes_in)] can only be used within the metrics command and inside another aggregate" + ) + ); + assertThat( + error("METRICS tests avg(rate(network.bytes_in)), rate(network.bytes_in)", tsdb), + equalTo( + "1:44: the rate aggregate [rate(network.bytes_in)] can only be used within the metrics command and inside another aggregate" + ) + ); + assertThat(error("METRICS tests max(avg(rate(network.bytes_in)))", tsdb), equalTo(""" + 1:19: nested aggregations [avg(rate(network.bytes_in))] not allowed inside other aggregations\ + [max(avg(rate(network.bytes_in)))] + line 1:23: the rate aggregate [rate(network.bytes_in)] can only be used within the metrics command\ + and inside another aggregate""")); + assertThat(error("METRICS tests max(avg(rate(network.bytes_in)))", tsdb), equalTo(""" + 1:19: nested aggregations [avg(rate(network.bytes_in))] not allowed inside other aggregations\ + [max(avg(rate(network.bytes_in)))] + line 1:23: the rate aggregate [rate(network.bytes_in)] can only be used within the metrics command\ + and inside another aggregate""")); + } + + public void testWeightedAvg() { + assertEquals( + "1:35: SECOND argument of [weighted_avg(v, null)] cannot be null or 0, received [null]", + error("row v = [1, 2, 3] | stats w_avg = weighted_avg(v, null)") + ); + assertEquals( + "1:27: SECOND argument of [weighted_avg(salary, null)] cannot be null or 0, received [null]", + error("from test | stats w_avg = weighted_avg(salary, null)") + ); + assertEquals( + "1:45: SECOND argument of [weighted_avg(v, w)] cannot be null or 0, received [null]", + error("row v = [1, 2, 3], w = null | stats w_avg = weighted_avg(v, w)") + ); + assertEquals( + "1:44: SECOND argument of [weighted_avg(salary, w)] cannot be null or 0, received [null]", + error("from test | eval w = null | stats w_avg = weighted_avg(salary, w)") + ); + assertEquals( + "1:51: SECOND argument of [weighted_avg(salary, w)] cannot be null or 0, received [null]", + error("from test | eval w = null + null | stats w_avg = weighted_avg(salary, w)") + ); + assertEquals( + "1:35: SECOND argument of [weighted_avg(v, 0)] cannot be null or 0, received [0]", + error("row v = [1, 2, 3] | stats w_avg = weighted_avg(v, 0)") + ); + assertEquals( + "1:27: SECOND argument of [weighted_avg(salary, 0.0)] cannot be null or 0, received [0.0]", + error("from test | stats w_avg = weighted_avg(salary, 0.0)") + ); + } + private String error(String query) { return error(query, defaultAnalyzer); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 33f9cb3123b8d..5e50af6a0d212 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -17,6 +17,8 @@ import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -76,8 +78,6 @@ protected boolean alwaysEmptySource() { return false; } - protected abstract List getNamedWriteables(); - public EsqlConfiguration configuration() { return config; } @@ -85,10 +85,15 @@ public EsqlConfiguration configuration() { @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(NamedExpression.getNamedWriteables()); + entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); + entries.addAll(EsqlScalarFunction.getNamedWriteables()); + entries.addAll(AggregateFunction.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); + entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); + entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); entries.addAll(EsField.getNamedWriteables()); - entries.addAll(getNamedWriteables()); + entries.add(org.elasticsearch.xpack.esql.expression.Order.ENTRY); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractUnaryScalarSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractUnaryScalarSerializationTests.java index d8290966acbdd..8581699b83fbd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractUnaryScalarSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractUnaryScalarSerializationTests.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.esql.expression; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import java.io.IOException; -import java.util.List; public abstract class AbstractUnaryScalarSerializationTests extends AbstractExpressionSerializationTests { protected abstract T create(Source source, Expression child); @@ -28,9 +26,4 @@ protected final T mutateInstance(T instance) throws IOException { Expression child = randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild); return create(instance.source(), child); } - - @Override - protected List getNamedWriteables() { - return UnaryScalarFunction.getNamedWriteables(); - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java index ce7aa789f89b1..a6808e835bc3c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java @@ -81,6 +81,7 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); entries.addAll(EsField.getNamedWriteables()); + entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/LiteralSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/LiteralSerializationTests.java index 39e18bf9761ec..fa6041c6d2e58 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/LiteralSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/LiteralSerializationTests.java @@ -7,12 +7,10 @@ package org.elasticsearch.xpack.esql.expression; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.LiteralTests; import java.io.IOException; -import java.util.List; public class LiteralSerializationTests extends AbstractExpressionSerializationTests { @Override @@ -25,11 +23,6 @@ protected Literal mutateInstance(Literal instance) throws IOException { return LiteralTests.mutateLiteral(instance); } - @Override - protected List getNamedWriteables() { - return List.of(Literal.ENTRY); - } - @Override protected boolean alwaysEmptySource() { return true; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/OrderSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/OrderSerializationTests.java index dd2671f4cf86d..3c5a77daf8832 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/OrderSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/OrderSerializationTests.java @@ -7,12 +7,10 @@ package org.elasticsearch.xpack.esql.expression; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import java.io.IOException; -import java.util.List; public class OrderSerializationTests extends AbstractExpressionSerializationTests { @Override @@ -42,11 +40,6 @@ protected Order mutateInstance(Order instance) throws IOException { return new Order(source, child, direction, nulls); } - @Override - protected List getNamedWriteables() { - return List.of(Order.ENTRY); - } - @Override protected boolean alwaysEmptySource() { return true; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java new file mode 100644 index 0000000000000..792c6b5139796 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -0,0 +1,341 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import org.elasticsearch.compute.aggregation.Aggregator; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.NumericUtils; +import org.elasticsearch.xpack.esql.expression.SurrogateExpression; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.optimizer.FoldNull; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.planner.ToAggregator; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; + +/** + * Base class for aggregation tests. + */ +public abstract class AbstractAggregationTestCase extends AbstractFunctionTestCase { + /** + * Converts a list of aggregation test cases into a list of parameter suppliers. + * Also, adds a default set of extra test cases. + *

      + * Use if possible, as this method may get updated with new checks in the future. + *

      + */ + protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks(List suppliers) { + // TODO: Add case with no input expecting null + return parameterSuppliersFromTypedData(withNoRowsExpectingNull(randomizeBytesRefsOffset(suppliers))); + } + + /** + * Adds a test case with no rows, expecting null, to the list of suppliers. + */ + protected static List withNoRowsExpectingNull(List suppliers) { + List newSuppliers = new ArrayList<>(suppliers); + Set> uniqueSignatures = new HashSet<>(); + + for (TestCaseSupplier original : suppliers) { + if (uniqueSignatures.add(original.types())) { + newSuppliers.add(new TestCaseSupplier(original.name() + " with no rows", original.types(), () -> { + var testCase = original.get(); + + if (testCase.getData().stream().noneMatch(TestCaseSupplier.TypedData::isMultiRow)) { + // Fail if no multi-row data, at least until a real case is found + fail("No multi-row data found in test case: " + testCase); + } + + var newData = testCase.getData().stream().map(td -> td.isMultiRow() ? td.withData(List.of()) : td).toList(); + + return new TestCaseSupplier.TestCase( + newData, + testCase.evaluatorToString(), + testCase.expectedType(), + nullValue(), + null, + testCase.getExpectedTypeError(), + null, + null + ); + })); + } + } + + return newSuppliers; + } + + public void testAggregate() { + Expression expression = randomBoolean() ? buildDeepCopyOfFieldExpression(testCase) : buildFieldExpression(testCase); + + resolveExpression(expression, this::aggregateSingleMode, this::evaluate); + } + + public void testAggregateIntermediate() { + Expression expression = randomBoolean() ? buildDeepCopyOfFieldExpression(testCase) : buildFieldExpression(testCase); + + resolveExpression(expression, this::aggregateWithIntermediates, this::evaluate); + } + + public void testFold() { + Expression expression = buildLiteralExpression(testCase); + + resolveExpression(expression, aggregatorFunctionSupplier -> { + // An aggregation cannot be folded + }, evaluableExpression -> { + assertTrue(evaluableExpression.foldable()); + if (testCase.foldingExceptionClass() == null) { + Object result = evaluableExpression.fold(); + // Decode unsigned longs into BigIntegers + if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { + result = NumericUtils.unsignedLongAsBigInteger((Long) result); + } + assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } else { + Throwable t = expectThrows(testCase.foldingExceptionClass(), evaluableExpression::fold); + assertThat(t.getMessage(), equalTo(testCase.foldingExceptionMessage())); + } + }); + } + + private void aggregateSingleMode(Expression expression) { + Object result; + try (var aggregator = aggregator(expression, initialInputChannels(), AggregatorMode.SINGLE)) { + for (Page inputPage : rows(testCase.getMultiRowFields())) { + try { + aggregator.processPage(inputPage); + } finally { + inputPage.releaseBlocks(); + } + } + + result = extractResultFromAggregator(aggregator, PlannerUtils.toElementType(testCase.expectedType())); + } + + assertThat(result, not(equalTo(Double.NaN))); + assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); + assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); + assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + + private void aggregateWithIntermediates(Expression expression) { + int intermediateBlockOffset = randomIntBetween(0, 10); + Block[] intermediateBlocks; + int intermediateStates; + + // Input rows to intermediate states + try (var aggregator = aggregator(expression, initialInputChannels(), AggregatorMode.INITIAL)) { + intermediateStates = aggregator.evaluateBlockCount(); + + int intermediateBlockExtraSize = randomIntBetween(0, 10); + intermediateBlocks = new Block[intermediateBlockOffset + intermediateStates + intermediateBlockExtraSize]; + + for (Page inputPage : rows(testCase.getMultiRowFields())) { + try { + aggregator.processPage(inputPage); + } finally { + inputPage.releaseBlocks(); + } + } + + aggregator.evaluate(intermediateBlocks, intermediateBlockOffset, driverContext()); + + int positionCount = intermediateBlocks[intermediateBlockOffset].getPositionCount(); + + // Fill offset and extra blocks with nulls + for (int i = 0; i < intermediateBlockOffset; i++) { + intermediateBlocks[i] = driverContext().blockFactory().newConstantNullBlock(positionCount); + } + for (int i = intermediateBlockOffset + intermediateStates; i < intermediateBlocks.length; i++) { + intermediateBlocks[i] = driverContext().blockFactory().newConstantNullBlock(positionCount); + } + } + + Object result; + // Intermediate states to final result + try ( + var aggregator = aggregator( + expression, + intermediaryInputChannels(intermediateStates, intermediateBlockOffset), + AggregatorMode.FINAL + ) + ) { + Page inputPage = new Page(intermediateBlocks); + try { + if (inputPage.getPositionCount() > 0) { + aggregator.processPage(inputPage); + } + } finally { + inputPage.releaseBlocks(); + } + + result = extractResultFromAggregator(aggregator, PlannerUtils.toElementType(testCase.expectedType())); + } + + assertThat(result, not(equalTo(Double.NaN))); + assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); + assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); + assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + + private void evaluate(Expression evaluableExpression) { + Object result; + try (var evaluator = evaluator(evaluableExpression).get(driverContext())) { + try (Block block = evaluator.eval(row(testCase.getDataValues()))) { + result = toJavaObjectUnsignedLongAware(block, 0); + } + } + + assertThat(result, not(equalTo(Double.NaN))); + assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); + assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); + assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + + private void resolveExpression(Expression expression, Consumer onAggregator, Consumer onEvaluableExpression) { + logger.info( + "Test Values: " + testCase.getData().stream().map(TestCaseSupplier.TypedData::toString).collect(Collectors.joining(",")) + ); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + expression = resolveSurrogates(expression); + + Expression.TypeResolution resolution = expression.typeResolved(); + if (resolution.unresolved()) { + throw new AssertionError("expected resolved " + resolution.message()); + } + + expression = new FoldNull().rule(expression); + assertThat(expression.dataType(), equalTo(testCase.expectedType())); + + assumeTrue( + "Surrogate expression with non-trivial children cannot be evaluated", + expression.children() + .stream() + .allMatch(child -> child instanceof FieldAttribute || child instanceof DeepCopy || child instanceof Literal) + ); + + if (expression instanceof AggregateFunction == false) { + onEvaluableExpression.accept(expression); + return; + } + + assertThat(expression, instanceOf(ToAggregator.class)); + logger.info("Result type: " + expression.dataType()); + + onAggregator.accept(expression); + } + + private Object extractResultFromAggregator(Aggregator aggregator, ElementType expectedElementType) { + var blocksArraySize = randomIntBetween(1, 10); + var resultBlockIndex = randomIntBetween(0, blocksArraySize - 1); + var blocks = new Block[blocksArraySize]; + try { + aggregator.evaluate(blocks, resultBlockIndex, driverContext()); + + var block = blocks[resultBlockIndex]; + + // For null blocks, the element type is NULL, so if the provided matcher matches, the type works too + assertThat(block.elementType(), is(oneOf(expectedElementType, ElementType.NULL))); + + return toJavaObject(blocks[resultBlockIndex], 0); + } finally { + Releasables.close(blocks); + } + } + + private List initialInputChannels() { + // TODO: Randomize channels + // TODO: If surrogated, channels may change + return IntStream.range(0, testCase.getMultiRowFields().size()).boxed().toList(); + } + + private List intermediaryInputChannels(int intermediaryStates, int offset) { + return IntStream.range(offset, offset + intermediaryStates).boxed().toList(); + } + + /** + * Resolves surrogates of aggregations until a non-surrogate expression is found. + *

      + * No-op if expecting errors, as surrogates depend on correct types + *

      + */ + private Expression resolveSurrogates(Expression expression) { + if (testCase.getExpectedTypeError() != null) { + return expression; + } + + for (int i = 0;; i++) { + assertThat("Potential infinite loop detected in surrogates", i, lessThan(10)); + + if (expression instanceof SurrogateExpression == false) { + break; + } + + var surrogate = ((SurrogateExpression) expression).surrogate(); + + if (surrogate == null) { + break; + } + + expression = surrogate; + } + + return expression; + } + + private Aggregator aggregator(Expression expression, List inputChannels, AggregatorMode mode) { + AggregatorFunctionSupplier aggregatorFunctionSupplier = ((ToAggregator) expression).supplier(inputChannels); + + return new Aggregator(aggregatorFunctionSupplier.aggregator(driverContext()), mode); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 249d4f7349517..f2ca17e2743bb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -15,7 +15,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; @@ -24,14 +23,10 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; -import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.Releasables; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.indices.CrankyCircuitBreakerService; @@ -45,24 +40,17 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionDefinition; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.util.NumericUtils; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunctionTestCase; -import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.optimizer.FoldNull; import org.elasticsearch.xpack.esql.parser.ExpressionBuilder; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.planner.PlannerUtils; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.versionfield.Version; -import org.hamcrest.Matcher; import org.junit.After; import org.junit.AfterClass; @@ -85,31 +73,21 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isSpatial; import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.sameInstance; /** - * Base class for function tests. Tests based on this class will generally build out a single example evaluation, - * which can be automatically tested against several scenarios (null handling, concurrency, etc). + * Base class for function tests. */ public abstract class AbstractFunctionTestCase extends ESTestCase { /** @@ -152,6 +130,12 @@ public static Literal randomLiteral(DataType type) { protected TestCaseSupplier.TestCase testCase; + /** + * Converts typed test suppliers to parameterized test parameters. + *

      + * Use {@code parameterSuppliersFromTypedDataWithDefaultChecks()} instead if possible, as it automatically add default checks. + *

      + */ protected static Iterable parameterSuppliersFromTypedData(List suppliers) { // TODO rename this method to something more descriptive. Javadoc. And make sure all parameters are "representable" types. List parameters = new ArrayList<>(suppliers.size()); @@ -161,24 +145,6 @@ protected static Iterable parameterSuppliersFromTypedData(List - * Use if possible, as this method may get updated with new checks in the future. - *

      - * - * @param entirelyNullPreservesType See {@link #anyNullIsNull(boolean, List)} - */ - protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks( - boolean entirelyNullPreservesType, - List suppliers - ) { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples(anyNullIsNull(entirelyNullPreservesType, randomizeBytesRefsOffset(suppliers))) - ); - } - /** * Build an {@link Attribute} that loads a field. */ @@ -224,6 +190,7 @@ protected final Expression buildDeepCopyOfFieldExpression(TestCaseSupplier.TestC } protected final Expression buildLiteralExpression(TestCaseSupplier.TestCase testCase) { + assumeTrue("Data can't be converted to literals", testCase.canGetDataAsLiterals()); return build(testCase.getSource(), testCase.getDataAsLiterals()); } @@ -250,240 +217,79 @@ protected final Page row(List values) { } /** - * Hack together a layout by scanning for Fields. - * Those will show up in the layout in whatever order a depth first traversal finds them. + * Creates a list of pages based on a list of multi-row fields. */ - protected static void buildLayout(Layout.Builder builder, Expression e) { - if (e instanceof FieldAttribute f) { - builder.append(f); - return; + protected final List rows(List multirowFields) { + if (multirowFields.isEmpty()) { + return List.of(); } - for (Expression c : e.children()) { - buildLayout(builder, c); - } - } - protected final void assertResolveTypeValid(Expression expression, DataType expectedType) { - assertTrue(expression.typeResolved().resolved()); - assertThat(expression.dataType(), equalTo(expectedType)); - } + var rowsCount = multirowFields.get(0).multiRowData().size(); - public final void testEvaluate() { - assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); - assumeTrue("Expected type must be representable to build an evaluator", EsqlDataTypes.isRepresentable(testCase.expectedType())); - logger.info( - "Test Values: " + testCase.getData().stream().map(TestCaseSupplier.TypedData::toString).collect(Collectors.joining(",")) - ); - boolean readFloating = randomBoolean(); - Expression expression = readFloating ? buildDeepCopyOfFieldExpression(testCase) : buildFieldExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); - return; - } - Expression.TypeResolution resolution = expression.typeResolved(); - if (resolution.unresolved()) { - throw new AssertionError("expected resolved " + resolution.message()); - } - expression = new FoldNull().rule(expression); - assertThat(expression.dataType(), equalTo(testCase.expectedType())); - logger.info("Result type: " + expression.dataType()); + multirowFields.stream() + .skip(1) + .forEach( + field -> assertThat("All multi-row fields must have the same number of rows", field.multiRowData(), hasSize(rowsCount)) + ); - Object result; - try (ExpressionEvaluator evaluator = evaluator(expression).get(driverContext())) { - try (Block block = evaluator.eval(row(testCase.getDataValues()))) { - result = toJavaObjectUnsignedLongAware(block, 0); + List pages = new ArrayList<>(); + + int pageSize = randomIntBetween(1, 100); + for (int initialRow = 0; initialRow < rowsCount;) { + if (pageSize > rowsCount - initialRow) { + pageSize = rowsCount - initialRow; } - } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } - } - private Object toJavaObjectUnsignedLongAware(Block block, int position) { - Object result; - result = toJavaObject(block, position); - if (result != null && testCase.expectedType() == DataType.UNSIGNED_LONG) { - assertThat(result, instanceOf(Long.class)); - result = NumericUtils.unsignedLongAsBigInteger((Long) result); - } - return result; - } + var blocks = new Block[multirowFields.size()]; - /** - * Evaluates a {@link Block} of values, all copied from the input pattern.. - *

      - * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. - *

      - */ - public final void testEvaluateBlockWithoutNulls() { - assumeTrue("no warning is expected", testCase.getExpectedWarnings() == null); - try { - testEvaluateBlock(driverContext().blockFactory(), driverContext(), false); - } catch (CircuitBreakingException ex) { - assertThat(ex.getMessage(), equalTo(MockBigArrays.ERROR_MESSAGE)); - assertFalse("Test data is too large to fit in the memory", true); - } - } + for (int i = 0; i < multirowFields.size(); i++) { + var field = multirowFields.get(i); + try ( + var wrapper = BlockUtils.wrapperFor( + TestBlockFactory.getNonBreakingInstance(), + PlannerUtils.toElementType(field.type()), + pageSize + ) + ) { + var multiRowData = field.multiRowData(); + for (int row = initialRow; row < initialRow + pageSize; row++) { + wrapper.accept(multiRowData.get(row)); + } - /** - * Evaluates a {@link Block} of values, all copied from the input pattern with - * some null values inserted between. - */ - public final void testEvaluateBlockWithNulls() { - assumeTrue("no warning is expected", testCase.getExpectedWarnings() == null); - try { - testEvaluateBlock(driverContext().blockFactory(), driverContext(), true); - } catch (CircuitBreakingException ex) { - assertThat(ex.getMessage(), equalTo(MockBigArrays.ERROR_MESSAGE)); - assertFalse("Test data is too large to fit in the memory", true); - } - } + blocks[i] = wrapper.builder().build(); + } + } - /** - * Evaluates a {@link Block} of values, all copied from the input pattern, - * using the {@link CrankyCircuitBreakerService} which fails randomly. - *

      - * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. - *

      - */ - public final void testCrankyEvaluateBlockWithoutNulls() { - assumeTrue("sometimes the cranky breaker silences warnings, just skip these cases", testCase.getExpectedWarnings() == null); - try { - testEvaluateBlock(driverContext().blockFactory(), crankyContext(), false); - } catch (CircuitBreakingException ex) { - assertThat(ex.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + pages.add(new Page(pageSize, blocks)); + initialRow += pageSize; + pageSize = randomIntBetween(1, 100); } - } - /** - * Evaluates a {@link Block} of values, all copied from the input pattern with - * some null values inserted between, using the {@link CrankyCircuitBreakerService} which fails randomly. - */ - public final void testCrankyEvaluateBlockWithNulls() { - assumeTrue("sometimes the cranky breaker silences warnings, just skip these cases", testCase.getExpectedWarnings() == null); - try { - testEvaluateBlock(driverContext().blockFactory(), crankyContext(), true); - } catch (CircuitBreakingException ex) { - assertThat(ex.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); - } + return pages; } /** - * Does the function produce the same output regardless of input? + * Hack together a layout by scanning for Fields. + * Those will show up in the layout in whatever order a depth first traversal finds them. */ - protected Matcher allNullsMatcher() { - return nullValue(); - } - - private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext context, boolean insertNulls) { - Expression expression = randomBoolean() ? buildDeepCopyOfFieldExpression(testCase) : buildFieldExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); + protected static void buildLayout(Layout.Builder builder, Expression e) { + if (e instanceof FieldAttribute f) { + builder.append(f); return; } - assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); - assumeTrue("Expected type must be representable to build an evaluator", EsqlDataTypes.isRepresentable(testCase.expectedType())); - int positions = between(1, 1024); - List data = testCase.getData(); - Page onePositionPage = row(testCase.getDataValues()); - Block[] manyPositionsBlocks = new Block[Math.toIntExact(data.stream().filter(d -> d.isForceLiteral() == false).count())]; - Set nullPositions = insertNulls - ? IntStream.range(0, positions).filter(i -> randomBoolean()).mapToObj(Integer::valueOf).collect(Collectors.toSet()) - : Set.of(); - if (nullPositions.size() == positions) { - nullPositions = Set.of(); - } - try { - int b = 0; - for (TestCaseSupplier.TypedData d : data) { - if (d.isForceLiteral()) { - continue; - } - ElementType elementType = PlannerUtils.toElementType(d.type()); - try (Block.Builder builder = elementType.newBlockBuilder(positions, inputBlockFactory)) { - for (int p = 0; p < positions; p++) { - if (nullPositions.contains(p)) { - builder.appendNull(); - } else { - builder.copyFrom(onePositionPage.getBlock(b), 0, 1); - } - } - manyPositionsBlocks[b] = builder.build(); - } - b++; - } - try ( - ExpressionEvaluator eval = evaluator(expression).get(context); - Block block = eval.eval(new Page(positions, manyPositionsBlocks)) - ) { - for (int p = 0; p < positions; p++) { - if (nullPositions.contains(p)) { - assertThat(toJavaObject(block, p), allNullsMatcher()); - continue; - } - assertThat(toJavaObjectUnsignedLongAware(block, p), testCase.getMatcher()); - } - assertThat( - "evaluates to tracked block", - block.blockFactory(), - either(sameInstance(context.blockFactory())).or(sameInstance(inputBlockFactory)) - ); - } - } finally { - Releasables.close(onePositionPage::releaseBlocks, Releasables.wrap(manyPositionsBlocks)); - } - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); + for (Expression c : e.children()) { + buildLayout(builder, c); } } - public void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull - Expression expression = buildFieldExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); - return; - } - assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); - List simpleData = testCase.getDataValues(); - try (EvalOperator.ExpressionEvaluator eval = evaluator(expression).get(driverContext())) { - BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); - Block[] orig = BlockUtils.fromListRow(blockFactory, simpleData); - for (int i = 0; i < orig.length; i++) { - List data = new ArrayList<>(); - Block[] blocks = new Block[orig.length]; - for (int b = 0; b < blocks.length; b++) { - if (b == i) { - blocks[b] = orig[b].elementType().newBlockBuilder(1, blockFactory).appendNull().build(); - data.add(null); - } else { - blocks[b] = orig[b]; - data.add(simpleData.get(b)); - } - } - try (Block block = eval.eval(new Page(blocks))) { - assertSimpleWithNulls(data, block, i); - } - } - - // Note: the null-in-fast-null-out handling prevents any exception from being thrown, so the warnings provided in some test - // cases won't actually be registered. This isn't an issue for unary functions, but could be an issue for n-ary ones, if - // function processing of the first parameter(s) could raise an exception/warning. (But hasn't been the case so far.) - // N-ary non-MV functions dealing with one multivalue (before hitting the null parameter injected above) will now trigger - // a warning ("SV-function encountered a MV") that thus needs to be checked. - if (this instanceof AbstractMultivalueFunctionTestCase == false - && simpleData.stream().anyMatch(List.class::isInstance) - && testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + protected Object toJavaObjectUnsignedLongAware(Block block, int position) { + Object result; + result = toJavaObject(block, position); + if (result != null && testCase.expectedType() == DataType.UNSIGNED_LONG) { + assertThat(result, instanceOf(Long.class)); + result = NumericUtils.unsignedLongAsBigInteger((Long) result); } + return result; } protected void assertSimpleWithNulls(List data, Block value, int nullBlock) { @@ -491,90 +297,39 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo assertTrue("argument " + nullBlock + " is null", value.isNull(0)); } - public final void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { - Expression expression = buildFieldExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); - return; - } - assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); - assumeTrue("Expected type must be representable to build an evaluator", EsqlDataTypes.isRepresentable(testCase.expectedType())); - int count = 10_000; - int threads = 5; - var evalSupplier = evaluator(expression); - ExecutorService exec = Executors.newFixedThreadPool(threads); - try { - List> futures = new ArrayList<>(); - for (int i = 0; i < threads; i++) { - List simpleData = testCase.getDataValues(); - Page page = row(simpleData); - - futures.add(exec.submit(() -> { - try (EvalOperator.ExpressionEvaluator eval = evalSupplier.get(driverContext())) { - for (int c = 0; c < count; c++) { - try (Block block = eval.eval(page)) { - assertThat(toJavaObjectUnsignedLongAware(block, 0), testCase.getMatcher()); - } - } - } - })); - } - for (Future f : futures) { - f.get(); - } - } finally { - exec.shutdown(); - } - } + /** + * Modifies suppliers to generate BytesRefs with random offsets. + */ + protected static List randomizeBytesRefsOffset(List testCaseSuppliers) { + return testCaseSuppliers.stream().map(supplier -> new TestCaseSupplier(supplier.name(), supplier.types(), () -> { + var testCase = supplier.supplier().get(); - public final void testEvaluatorToString() { - Expression expression = buildFieldExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); - return; - } - assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); - var factory = evaluator(expression); - try (ExpressionEvaluator ev = factory.get(driverContext())) { - assertThat(ev.toString(), testCase.evaluatorToString()); - } - } + var newData = testCase.getData().stream().map(typedData -> { + if (typedData.data() instanceof BytesRef bytesRef) { + var offset = randomIntBetween(0, 10); + var extraLength = randomIntBetween(0, 10); + var newBytesArray = randomByteArrayOfLength(bytesRef.length + offset + extraLength); - public final void testFactoryToString() { - Expression expression = buildFieldExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); - return; - } - assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); - var factory = evaluator(buildFieldExpression(testCase)); - assertThat(factory.toString(), testCase.evaluatorToString()); - } + System.arraycopy(bytesRef.bytes, bytesRef.offset, newBytesArray, offset, bytesRef.length); - public final void testFold() { - Expression expression = buildLiteralExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); - return; - } - assertFalse(expression.typeResolved().unresolved()); - Expression nullOptimized = new FoldNull().rule(expression); - assertThat(nullOptimized.dataType(), equalTo(testCase.expectedType())); - assertTrue(nullOptimized.foldable()); - if (testCase.foldingExceptionClass() == null) { - Object result = nullOptimized.fold(); - // Decode unsigned longs into BigIntegers - if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { - result = NumericUtils.unsignedLongAsBigInteger((Long) result); - } - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } - } else { - Throwable t = expectThrows(testCase.foldingExceptionClass(), nullOptimized::fold); - assertThat(t.getMessage(), equalTo(testCase.foldingExceptionMessage())); - } + var newBytesRef = new BytesRef(newBytesArray, offset, bytesRef.length); + + return typedData.withData(newBytesRef); + } + return typedData; + }).toList(); + + return new TestCaseSupplier.TestCase( + newData, + testCase.evaluatorToString(), + testCase.expectedType(), + testCase.getMatcher(), + testCase.getExpectedWarnings(), + testCase.getExpectedTypeError(), + testCase.foldingExceptionClass(), + testCase.foldingExceptionMessage() + ); + })).toList(); } public void testSerializationOfSimple() { @@ -596,6 +351,12 @@ public static void testFunctionInfo() { List args = description.args(); assertTrue("expect description to be defined", description.description() != null && false == description.description().isEmpty()); + assertThat( + "descriptions should be complete sentences", + description.description(), + either(endsWith(".")) // A full sentence + .or(endsWith("∅")) // Math + ); List> typesFromSignature = new ArrayList<>(); Set returnFromSignature = new HashSet<>(); @@ -625,558 +386,6 @@ public static void testFunctionInfo() { Set returnTypes = Arrays.stream(description.returnType()).collect(Collectors.toCollection(TreeSet::new)); assertEquals(returnFromSignature, returnTypes); - - } - - /** - * Adds cases with {@code null} and asserts that the result is {@code null}. - *

      - * Note: This won't add more than a single null to any existing test case, - * just to keep the number of test cases from exploding totally. - *

      - * - * @param entirelyNullPreservesType should a test case that only contains parameters - * with the {@code null} type keep it's expected type? - * This is mostly going to be {@code true} - * except for functions that base their type entirely - * on input types like {@link Greatest} or {@link Coalesce}. - */ - protected static List anyNullIsNull(boolean entirelyNullPreservesType, List testCaseSuppliers) { - return anyNullIsNull( - testCaseSuppliers, - (nullPosition, nullValueDataType, original) -> entirelyNullPreservesType == false - && nullValueDataType == DataType.NULL - && original.getData().size() == 1 ? DataType.NULL : original.expectedType(), - (nullPosition, nullData, original) -> original - ); - } - - public interface ExpectedType { - DataType expectedType(int nullPosition, DataType nullValueDataType, TestCaseSupplier.TestCase original); - } - - public interface ExpectedEvaluatorToString { - Matcher evaluatorToString(int nullPosition, TestCaseSupplier.TypedData nullData, Matcher original); - } - - /** - * Modifies suppliers to generate BytesRefs with random offsets. - */ - protected static List randomizeBytesRefsOffset(List testCaseSuppliers) { - return testCaseSuppliers.stream().map(supplier -> new TestCaseSupplier(supplier.name(), supplier.types(), () -> { - var testCase = supplier.supplier().get(); - - var newData = testCase.getData().stream().map(typedData -> { - if (typedData.data() instanceof BytesRef bytesRef) { - var offset = randomIntBetween(0, 10); - var extraLength = randomIntBetween(0, 10); - var newBytesArray = randomByteArrayOfLength(bytesRef.length + offset + extraLength); - - System.arraycopy(bytesRef.bytes, bytesRef.offset, newBytesArray, offset, bytesRef.length); - - var newBytesRef = new BytesRef(newBytesArray, offset, bytesRef.length); - var newTypedData = new TestCaseSupplier.TypedData(newBytesRef, typedData.type(), typedData.name()); - - if (typedData.isForceLiteral()) { - newTypedData.forceLiteral(); - } - - return newTypedData; - } - return typedData; - }).toList(); - - return new TestCaseSupplier.TestCase( - newData, - testCase.evaluatorToString(), - testCase.expectedType(), - testCase.getMatcher(), - testCase.getExpectedWarnings(), - testCase.getExpectedTypeError(), - testCase.foldingExceptionClass(), - testCase.foldingExceptionMessage() - ); - })).toList(); - } - - protected static List anyNullIsNull( - List testCaseSuppliers, - ExpectedType expectedType, - ExpectedEvaluatorToString evaluatorToString - ) { - typesRequired(testCaseSuppliers); - List suppliers = new ArrayList<>(testCaseSuppliers.size()); - suppliers.addAll(testCaseSuppliers); - - /* - * For each original test case, add as many copies as there were - * arguments, replacing one of the arguments with null and keeping - * the others. - * - * Also, if this was the first time we saw the signature we copy it - * *again*, replacing the argument with null, but annotating the - * argument's type as `null` explicitly. - */ - Set> uniqueSignatures = new HashSet<>(); - for (TestCaseSupplier original : testCaseSuppliers) { - boolean firstTimeSeenSignature = uniqueSignatures.add(original.types()); - for (int nullPosition = 0; nullPosition < original.types().size(); nullPosition++) { - int finalNullPosition = nullPosition; - suppliers.add(new TestCaseSupplier(original.name() + " null in " + nullPosition, original.types(), () -> { - TestCaseSupplier.TestCase oc = original.get(); - List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { - TestCaseSupplier.TypedData od = oc.getData().get(i); - return i == finalNullPosition ? od.forceValueToNull() : od; - }).toList(); - TestCaseSupplier.TypedData nulledData = oc.getData().get(finalNullPosition); - return new TestCaseSupplier.TestCase( - data, - evaluatorToString.evaluatorToString(finalNullPosition, nulledData, oc.evaluatorToString()), - expectedType.expectedType(finalNullPosition, nulledData.type(), oc), - nullValue(), - null, - oc.getExpectedTypeError(), - null, - null - ); - })); - - if (firstTimeSeenSignature) { - List typesWithNull = IntStream.range(0, original.types().size()) - .mapToObj(i -> i == finalNullPosition ? DataType.NULL : original.types().get(i)) - .toList(); - boolean newSignature = uniqueSignatures.add(typesWithNull); - if (newSignature) { - suppliers.add(new TestCaseSupplier(typesWithNull, () -> { - TestCaseSupplier.TestCase oc = original.get(); - List data = IntStream.range(0, oc.getData().size()) - .mapToObj(i -> i == finalNullPosition ? TestCaseSupplier.TypedData.NULL : oc.getData().get(i)) - .toList(); - return new TestCaseSupplier.TestCase( - data, - equalTo("LiteralsEvaluator[lit=null]"), - expectedType.expectedType(finalNullPosition, DataType.NULL, oc), - nullValue(), - null, - oc.getExpectedTypeError(), - null, - null - ); - })); - } - } - } - } - - return suppliers; - - } - - /** - * Adds test cases containing unsupported parameter types that assert - * that they throw type errors. - */ - protected static List errorsForCasesWithoutExamples(List testCaseSuppliers) { - return errorsForCasesWithoutExamples(testCaseSuppliers, AbstractFunctionTestCase::typeErrorMessage); - } - - protected static List errorsForCasesWithoutExamples( - List testCaseSuppliers, - TypeErrorMessageSupplier typeErrorMessageSupplier - ) { - typesRequired(testCaseSuppliers); - List suppliers = new ArrayList<>(testCaseSuppliers.size()); - suppliers.addAll(testCaseSuppliers); - - Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); - List> validPerPosition = validPerPosition(valid); - - testCaseSuppliers.stream() - .map(s -> s.types().size()) - .collect(Collectors.toSet()) - .stream() - .flatMap(count -> allPermutations(count)) - .filter(types -> valid.contains(types) == false) - /* - * Skip any cases with more than one null. Our tests don't generate - * the full combinatorial explosions of all nulls - just a single null. - * Hopefully , cases will function the same as , - * cases. - */.filter(types -> types.stream().filter(t -> t == DataType.NULL).count() <= 1) - .map(types -> typeErrorSupplier(validPerPosition.size() != 1, validPerPosition, types, typeErrorMessageSupplier)) - .forEach(suppliers::add); - return suppliers; - } - - public static String errorMessageStringForBinaryOperators( - boolean includeOrdinal, - List> validPerPosition, - List types - ) { - try { - return typeErrorMessage(includeOrdinal, validPerPosition, types); - } catch (IllegalStateException e) { - // This means all the positional args were okay, so the expected error is from the combination - if (types.get(0).equals(DataType.UNSIGNED_LONG)) { - return "first argument of [] is [unsigned_long] and second is [" - + types.get(1).typeName() - + "]. [unsigned_long] can only be operated on together with another [unsigned_long]"; - - } - if (types.get(1).equals(DataType.UNSIGNED_LONG)) { - return "first argument of [] is [" - + types.get(0).typeName() - + "] and second is [unsigned_long]. [unsigned_long] can only be operated on together with another [unsigned_long]"; - } - return "first argument of [] is [" - + (types.get(0).isNumeric() ? "numeric" : types.get(0).typeName()) - + "] so second argument must also be [" - + (types.get(0).isNumeric() ? "numeric" : types.get(0).typeName()) - + "] but was [" - + types.get(1).typeName() - + "]"; - - } - } - - /** - * Adds test cases containing unsupported parameter types that immediately fail. - */ - protected static List failureForCasesWithoutExamples(List testCaseSuppliers) { - typesRequired(testCaseSuppliers); - List suppliers = new ArrayList<>(testCaseSuppliers.size()); - suppliers.addAll(testCaseSuppliers); - - Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); - List> validPerPosition = validPerPosition(valid); - - testCaseSuppliers.stream() - .map(s -> s.types().size()) - .collect(Collectors.toSet()) - .stream() - .flatMap(count -> allPermutations(count)) - .filter(types -> valid.contains(types) == false) - .map(types -> new TestCaseSupplier("type error for " + TestCaseSupplier.nameFromTypes(types), types, () -> { - throw new IllegalStateException("must implement a case for " + types); - })) - .forEach(suppliers::add); - return suppliers; - } - - /** - * Validate that we know the types for all the test cases already created - * @param suppliers - list of suppliers before adding in the illegal type combinations - */ - private static void typesRequired(List suppliers) { - String bad = suppliers.stream().filter(s -> s.types() == null).map(s -> s.name()).collect(Collectors.joining("\n")); - if (bad.equals("") == false) { - throw new IllegalArgumentException("types required but not found for these tests:\n" + bad); - } - } - - private static List> validPerPosition(Set> valid) { - int max = valid.stream().mapToInt(List::size).max().getAsInt(); - List> result = new ArrayList<>(max); - for (int i = 0; i < max; i++) { - result.add(new HashSet<>()); - } - for (List signature : valid) { - for (int i = 0; i < signature.size(); i++) { - result.get(i).add(signature.get(i)); - } - } - return result; - } - - private static Stream> allPermutations(int argumentCount) { - if (argumentCount == 0) { - return Stream.of(List.of()); - } - if (argumentCount > 3) { - throw new IllegalArgumentException("would generate too many combinations"); - } - Stream> stream = representable().map(t -> List.of(t)); - for (int i = 1; i < argumentCount; i++) { - stream = stream.flatMap(types -> representable().map(t -> append(types, t))); - } - return stream; - } - - private static List append(List orig, DataType extra) { - List longer = new ArrayList<>(orig.size() + 1); - longer.addAll(orig); - longer.add(extra); - return longer; - } - - @FunctionalInterface - protected interface TypeErrorMessageSupplier { - String apply(boolean includeOrdinal, List> validPerPosition, List types); - } - - protected static TestCaseSupplier typeErrorSupplier( - boolean includeOrdinal, - List> validPerPosition, - List types - ) { - return typeErrorSupplier(includeOrdinal, validPerPosition, types, AbstractFunctionTestCase::typeErrorMessage); - } - - /** - * Build a test case that asserts that the combination of parameter types is an error. - */ - protected static TestCaseSupplier typeErrorSupplier( - boolean includeOrdinal, - List> validPerPosition, - List types, - TypeErrorMessageSupplier errorMessageSupplier - ) { - return new TestCaseSupplier( - "type error for " + TestCaseSupplier.nameFromTypes(types), - types, - () -> TestCaseSupplier.TestCase.typeError( - types.stream().map(type -> new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, type.typeName())).toList(), - errorMessageSupplier.apply(includeOrdinal, validPerPosition, types) - ) - ); - } - - /** - * Build the expected error message for an invalid type signature. - */ - protected static String typeErrorMessage(boolean includeOrdinal, List> validPerPosition, List types) { - int badArgPosition = -1; - for (int i = 0; i < types.size(); i++) { - if (validPerPosition.get(i).contains(types.get(i)) == false) { - badArgPosition = i; - break; - } - } - if (badArgPosition == -1) { - throw new IllegalStateException( - "Can't generate error message for these types, you probably need a custom error message function" - ); - } - String ordinal = includeOrdinal ? TypeResolutions.ParamOrdinal.fromIndex(badArgPosition).name().toLowerCase(Locale.ROOT) + " " : ""; - String expectedType = expectedType(validPerPosition.get(badArgPosition)); - String name = types.get(badArgPosition).typeName(); - return ordinal + "argument of [] must be [" + expectedType + "], found value [" + name + "] type [" + name + "]"; - } - - private static final Map, String> NAMED_EXPECTED_TYPES = Map.ofEntries( - Map.entry( - Set.of(DataType.DATE_PERIOD, DataType.DOUBLE, DataType.INTEGER, DataType.LONG, DataType.TIME_DURATION, DataType.NULL), - "numeric, date_period or time_duration" - ), - Map.entry(Set.of(DataType.DATETIME, DataType.NULL), "datetime"), - Map.entry(Set.of(DataType.DOUBLE, DataType.NULL), "double"), - Map.entry(Set.of(DataType.INTEGER, DataType.NULL), "integer"), - Map.entry(Set.of(DataType.IP, DataType.NULL), "ip"), - Map.entry(Set.of(DataType.LONG, DataType.INTEGER, DataType.UNSIGNED_LONG, DataType.DOUBLE, DataType.NULL), "numeric"), - Map.entry(Set.of(DataType.LONG, DataType.INTEGER, DataType.UNSIGNED_LONG, DataType.DOUBLE), "numeric"), - Map.entry(Set.of(DataType.KEYWORD, DataType.TEXT, DataType.VERSION, DataType.NULL), "string or version"), - Map.entry(Set.of(DataType.KEYWORD, DataType.TEXT, DataType.NULL), "string"), - Map.entry(Set.of(DataType.IP, DataType.KEYWORD, DataType.TEXT, DataType.NULL), "ip or string"), - Map.entry(Set.copyOf(Arrays.asList(representableTypes())), "representable"), - Map.entry(Set.copyOf(Arrays.asList(representableNonSpatialTypes())), "representableNonSpatial"), - Map.entry( - Set.of( - DataType.BOOLEAN, - DataType.DOUBLE, - DataType.INTEGER, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.NULL - ), - "boolean or numeric or string" - ), - Map.entry( - Set.of( - DataType.DATETIME, - DataType.DOUBLE, - DataType.INTEGER, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.NULL - ), - "datetime or numeric or string" - ), - // What Add accepts - Map.entry( - Set.of( - DataType.DATE_PERIOD, - DataType.DATETIME, - DataType.DOUBLE, - DataType.INTEGER, - DataType.LONG, - DataType.NULL, - DataType.TIME_DURATION, - DataType.UNSIGNED_LONG - ), - "datetime or numeric" - ), - Map.entry( - Set.of( - DataType.BOOLEAN, - DataType.DATETIME, - DataType.DOUBLE, - DataType.INTEGER, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.NULL - ), - "boolean or datetime or numeric or string" - ), - // to_int - Map.entry( - Set.of( - DataType.BOOLEAN, - DataType.COUNTER_INTEGER, - DataType.DATETIME, - DataType.DOUBLE, - DataType.INTEGER, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.NULL - ), - "boolean or counter_integer or datetime or numeric or string" - ), - // to_long - Map.entry( - Set.of( - DataType.BOOLEAN, - DataType.COUNTER_INTEGER, - DataType.COUNTER_LONG, - DataType.DATETIME, - DataType.DOUBLE, - DataType.INTEGER, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.NULL - ), - "boolean or counter_integer or counter_long or datetime or numeric or string" - ), - // to_double - Map.entry( - Set.of( - DataType.BOOLEAN, - DataType.COUNTER_DOUBLE, - DataType.COUNTER_INTEGER, - DataType.COUNTER_LONG, - DataType.DATETIME, - DataType.DOUBLE, - DataType.INTEGER, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.NULL - ), - "boolean or counter_double or counter_integer or counter_long or datetime or numeric or string" - ), - Map.entry( - Set.of( - DataType.BOOLEAN, - DataType.CARTESIAN_POINT, - DataType.DATETIME, - DataType.DOUBLE, - DataType.GEO_POINT, - DataType.INTEGER, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.NULL - ), - "boolean or cartesian_point or datetime or geo_point or numeric or string" - ), - Map.entry( - Set.of( - DataType.DATETIME, - DataType.DOUBLE, - DataType.INTEGER, - DataType.IP, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.VERSION, - DataType.NULL - ), - "datetime, double, integer, ip, keyword, long, text, unsigned_long or version" - ), - Map.entry( - Set.of( - DataType.BOOLEAN, - DataType.DATETIME, - DataType.DOUBLE, - DataType.GEO_POINT, - DataType.GEO_SHAPE, - DataType.INTEGER, - DataType.IP, - DataType.KEYWORD, - DataType.LONG, - DataType.TEXT, - DataType.UNSIGNED_LONG, - DataType.VERSION, - DataType.NULL - ), - "cartesian_point or datetime or geo_point or numeric or string" - ), - Map.entry(Set.of(DataType.GEO_POINT, DataType.KEYWORD, DataType.TEXT, DataType.NULL), "geo_point or string"), - Map.entry(Set.of(DataType.CARTESIAN_POINT, DataType.KEYWORD, DataType.TEXT, DataType.NULL), "cartesian_point or string"), - Map.entry( - Set.of(DataType.GEO_POINT, DataType.GEO_SHAPE, DataType.KEYWORD, DataType.TEXT, DataType.NULL), - "geo_point or geo_shape or string" - ), - Map.entry( - Set.of(DataType.CARTESIAN_POINT, DataType.CARTESIAN_SHAPE, DataType.KEYWORD, DataType.TEXT, DataType.NULL), - "cartesian_point or cartesian_shape or string" - ), - Map.entry(Set.of(DataType.GEO_POINT, DataType.CARTESIAN_POINT, DataType.NULL), "geo_point or cartesian_point"), - Map.entry(Set.of(DataType.DATE_PERIOD, DataType.TIME_DURATION, DataType.NULL), "dateperiod or timeduration") - ); - - // TODO: generate this message dynamically, a la AbstractConvertFunction#supportedTypesNames()? - private static String expectedType(Set validTypes) { - String named = NAMED_EXPECTED_TYPES.get(validTypes); - if (named == null) { - /* - * Note for anyone who's test lands here - it's likely that you - * don't have a test case covering explicit `null` arguments in - * this position. Generally you can get that with anyNullIsNull. - */ - throw new UnsupportedOperationException( - "can't guess expected types for " + validTypes.stream().sorted(Comparator.comparing(t -> t.typeName())).toList() - ); - } - return named; - } - - protected static Stream representable() { - return DataType.types().stream().filter(EsqlDataTypes::isRepresentable); - } - - protected static DataType[] representableTypes() { - return representable().toArray(DataType[]::new); - } - - protected static Stream representableNonSpatial() { - return representable().filter(t -> isSpatial(t) == false); - } - - protected static DataType[] representableNonSpatialTypes() { - return representableNonSpatial().toArray(DataType[]::new); } protected final void assertTypeResolutionFailure(Expression expression) { @@ -1468,7 +677,7 @@ private static void renderKibanaFunctionDefinition( "comment", "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it." ); - builder.field("type", "eval"); // TODO aggs in here too + builder.field("type", isAggregation() ? "agg" : "eval"); builder.field("name", name); builder.field("description", removeAsciidocLinks(info.description())); if (Strings.isNullOrEmpty(info.note()) == false) { @@ -1661,4 +870,14 @@ static Version randomVersion() { protected static DataType[] strings() { return DataType.types().stream().filter(DataType::isString).toArray(DataType[]::new); } + + /** + * Returns true if the current test case is for an aggregation function. + *

      + * This method requires reflection, as it's called from a static context (@AfterClass documentation rendering). + *

      + */ + private static boolean isAggregation() { + return AbstractAggregationTestCase.class.isAssignableFrom(getTestClass()); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java new file mode 100644 index 0000000000000..1aa90d367099a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -0,0 +1,884 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.elasticsearch.xpack.esql.TestBlockFactory; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.NumericUtils; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.optimizer.FoldNull; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.hamcrest.Matcher; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isSpatial; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +/** + * Base class for scalar function tests. Tests based on this class will generally build out a single example evaluation, + * which can be automatically tested against several scenarios (null handling, concurrency, etc). + */ +public abstract class AbstractScalarFunctionTestCase extends AbstractFunctionTestCase { + + /** + * Converts a list of test cases into a list of parameter suppliers. + * Also, adds a default set of extra test cases. + *

      + * Use if possible, as this method may get updated with new checks in the future. + *

      + * + * @param entirelyNullPreservesType See {@link #anyNullIsNull(boolean, List)} + */ + protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks( + boolean entirelyNullPreservesType, + List suppliers + ) { + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples(anyNullIsNull(entirelyNullPreservesType, randomizeBytesRefsOffset(suppliers))) + ); + } + + public final void testEvaluate() { + assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); + boolean readFloating = randomBoolean(); + Expression expression = readFloating ? buildDeepCopyOfFieldExpression(testCase) : buildFieldExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assumeTrue("Expected type must be representable to build an evaluator", EsqlDataTypes.isRepresentable(testCase.expectedType())); + logger.info( + "Test Values: " + testCase.getData().stream().map(TestCaseSupplier.TypedData::toString).collect(Collectors.joining(",")) + ); + Expression.TypeResolution resolution = expression.typeResolved(); + if (resolution.unresolved()) { + throw new AssertionError("expected resolved " + resolution.message()); + } + expression = new FoldNull().rule(expression); + assertThat(expression.dataType(), equalTo(testCase.expectedType())); + logger.info("Result type: " + expression.dataType()); + + Object result; + try (ExpressionEvaluator evaluator = evaluator(expression).get(driverContext())) { + try (Block block = evaluator.eval(row(testCase.getDataValues()))) { + result = toJavaObjectUnsignedLongAware(block, 0); + } + } + assertThat(result, not(equalTo(Double.NaN))); + assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); + assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); + assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + + /** + * Evaluates a {@link Block} of values, all copied from the input pattern.. + *

      + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. + *

      + */ + public final void testEvaluateBlockWithoutNulls() { + assumeTrue("no warning is expected", testCase.getExpectedWarnings() == null); + try { + testEvaluateBlock(driverContext().blockFactory(), driverContext(), false); + } catch (CircuitBreakingException ex) { + assertThat(ex.getMessage(), equalTo(MockBigArrays.ERROR_MESSAGE)); + fail("Test data is too large to fit in the memory"); + } + } + + /** + * Evaluates a {@link Block} of values, all copied from the input pattern with + * some null values inserted between. + */ + public final void testEvaluateBlockWithNulls() { + assumeTrue("no warning is expected", testCase.getExpectedWarnings() == null); + try { + testEvaluateBlock(driverContext().blockFactory(), driverContext(), true); + } catch (CircuitBreakingException ex) { + assertThat(ex.getMessage(), equalTo(MockBigArrays.ERROR_MESSAGE)); + fail("Test data is too large to fit in the memory"); + } + } + + /** + * Evaluates a {@link Block} of values, all copied from the input pattern, + * using the {@link CrankyCircuitBreakerService} which fails randomly. + *

      + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. + *

      + */ + public final void testCrankyEvaluateBlockWithoutNulls() { + assumeTrue("sometimes the cranky breaker silences warnings, just skip these cases", testCase.getExpectedWarnings() == null); + try { + testEvaluateBlock(driverContext().blockFactory(), crankyContext(), false); + } catch (CircuitBreakingException ex) { + assertThat(ex.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + /** + * Evaluates a {@link Block} of values, all copied from the input pattern with + * some null values inserted between, using the {@link CrankyCircuitBreakerService} which fails randomly. + */ + public final void testCrankyEvaluateBlockWithNulls() { + assumeTrue("sometimes the cranky breaker silences warnings, just skip these cases", testCase.getExpectedWarnings() == null); + try { + testEvaluateBlock(driverContext().blockFactory(), crankyContext(), true); + } catch (CircuitBreakingException ex) { + assertThat(ex.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + protected Matcher allNullsMatcher() { + return nullValue(); + } + + private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext context, boolean insertNulls) { + Expression expression = randomBoolean() ? buildDeepCopyOfFieldExpression(testCase) : buildFieldExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); + assumeTrue("Expected type must be representable to build an evaluator", EsqlDataTypes.isRepresentable(testCase.expectedType())); + int positions = between(1, 1024); + List data = testCase.getData(); + Page onePositionPage = row(testCase.getDataValues()); + Block[] manyPositionsBlocks = new Block[Math.toIntExact(data.stream().filter(d -> d.isForceLiteral() == false).count())]; + Set nullPositions = insertNulls + ? IntStream.range(0, positions).filter(i -> randomBoolean()).mapToObj(Integer::valueOf).collect(Collectors.toSet()) + : Set.of(); + if (nullPositions.size() == positions) { + nullPositions = Set.of(); + } + try { + int b = 0; + for (TestCaseSupplier.TypedData d : data) { + if (d.isForceLiteral()) { + continue; + } + ElementType elementType = PlannerUtils.toElementType(d.type()); + try (Block.Builder builder = elementType.newBlockBuilder(positions, inputBlockFactory)) { + for (int p = 0; p < positions; p++) { + if (nullPositions.contains(p)) { + builder.appendNull(); + } else { + builder.copyFrom(onePositionPage.getBlock(b), 0, 1); + } + } + manyPositionsBlocks[b] = builder.build(); + } + b++; + } + try ( + ExpressionEvaluator eval = evaluator(expression).get(context); + Block block = eval.eval(new Page(positions, manyPositionsBlocks)) + ) { + for (int p = 0; p < positions; p++) { + if (nullPositions.contains(p)) { + assertThat(toJavaObject(block, p), allNullsMatcher()); + continue; + } + assertThat(toJavaObjectUnsignedLongAware(block, p), testCase.getMatcher()); + } + assertThat( + "evaluates to tracked block", + block.blockFactory(), + either(sameInstance(context.blockFactory())).or(sameInstance(inputBlockFactory)) + ); + } + } finally { + Releasables.close(onePositionPage::releaseBlocks, Releasables.wrap(manyPositionsBlocks)); + } + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + + public void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull + Expression expression = buildFieldExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); + List simpleData = testCase.getDataValues(); + try (EvalOperator.ExpressionEvaluator eval = evaluator(expression).get(driverContext())) { + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + Block[] orig = BlockUtils.fromListRow(blockFactory, simpleData); + for (int i = 0; i < orig.length; i++) { + List data = new ArrayList<>(); + Block[] blocks = new Block[orig.length]; + for (int b = 0; b < blocks.length; b++) { + if (b == i) { + blocks[b] = orig[b].elementType().newBlockBuilder(1, blockFactory).appendNull().build(); + data.add(null); + } else { + blocks[b] = orig[b]; + data.add(simpleData.get(b)); + } + } + try (Block block = eval.eval(new Page(blocks))) { + assertSimpleWithNulls(data, block, i); + } + } + + // Note: the null-in-fast-null-out handling prevents any exception from being thrown, so the warnings provided in some test + // cases won't actually be registered. This isn't an issue for unary functions, but could be an issue for n-ary ones, if + // function processing of the first parameter(s) could raise an exception/warning. (But hasn't been the case so far.) + // N-ary non-MV functions dealing with one multivalue (before hitting the null parameter injected above) will now trigger + // a warning ("SV-function encountered a MV") that thus needs to be checked. + if (this instanceof AbstractMultivalueFunctionTestCase == false + && simpleData.stream().anyMatch(List.class::isInstance) + && testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + } + + public final void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { + Expression expression = buildFieldExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); + assumeTrue("Expected type must be representable to build an evaluator", EsqlDataTypes.isRepresentable(testCase.expectedType())); + int count = 10_000; + int threads = 5; + var evalSupplier = evaluator(expression); + ExecutorService exec = Executors.newFixedThreadPool(threads); + try { + List> futures = new ArrayList<>(); + for (int i = 0; i < threads; i++) { + List simpleData = testCase.getDataValues(); + Page page = row(simpleData); + + futures.add(exec.submit(() -> { + try (EvalOperator.ExpressionEvaluator eval = evalSupplier.get(driverContext())) { + for (int c = 0; c < count; c++) { + try (Block block = eval.eval(page)) { + assertThat(toJavaObjectUnsignedLongAware(block, 0), testCase.getMatcher()); + } + } + } + })); + } + for (Future f : futures) { + f.get(); + } + } finally { + exec.shutdown(); + } + } + + public final void testEvaluatorToString() { + Expression expression = buildFieldExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); + var factory = evaluator(expression); + try (ExpressionEvaluator ev = factory.get(driverContext())) { + assertThat(ev.toString(), testCase.evaluatorToString()); + } + } + + public final void testFactoryToString() { + Expression expression = buildFieldExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); + var factory = evaluator(buildFieldExpression(testCase)); + assertThat(factory.toString(), testCase.evaluatorToString()); + } + + public final void testFold() { + Expression expression = buildLiteralExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assertFalse(expression.typeResolved().unresolved()); + Expression nullOptimized = new FoldNull().rule(expression); + assertThat(nullOptimized.dataType(), equalTo(testCase.expectedType())); + assertTrue(nullOptimized.foldable()); + if (testCase.foldingExceptionClass() == null) { + Object result = nullOptimized.fold(); + // Decode unsigned longs into BigIntegers + if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { + result = NumericUtils.unsignedLongAsBigInteger((Long) result); + } + assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } else { + Throwable t = expectThrows(testCase.foldingExceptionClass(), nullOptimized::fold); + assertThat(t.getMessage(), equalTo(testCase.foldingExceptionMessage())); + } + } + + /** + * Adds cases with {@code null} and asserts that the result is {@code null}. + *

      + * Note: This won't add more than a single null to any existing test case, + * just to keep the number of test cases from exploding totally. + *

      + * + * @param entirelyNullPreservesType should a test case that only contains parameters + * with the {@code null} type keep it's expected type? + * This is mostly going to be {@code true} + * except for functions that base their type entirely + * on input types like {@link Greatest} or {@link Coalesce}. + */ + protected static List anyNullIsNull(boolean entirelyNullPreservesType, List testCaseSuppliers) { + return anyNullIsNull( + testCaseSuppliers, + (nullPosition, nullValueDataType, original) -> entirelyNullPreservesType == false + && nullValueDataType == DataType.NULL + && original.getData().size() == 1 ? DataType.NULL : original.expectedType(), + (nullPosition, nullData, original) -> original + ); + } + + public interface ExpectedType { + DataType expectedType(int nullPosition, DataType nullValueDataType, TestCaseSupplier.TestCase original); + } + + public interface ExpectedEvaluatorToString { + Matcher evaluatorToString(int nullPosition, TestCaseSupplier.TypedData nullData, Matcher original); + } + + protected static List anyNullIsNull( + List testCaseSuppliers, + ExpectedType expectedType, + ExpectedEvaluatorToString evaluatorToString + ) { + typesRequired(testCaseSuppliers); + List suppliers = new ArrayList<>(testCaseSuppliers.size()); + suppliers.addAll(testCaseSuppliers); + + /* + * For each original test case, add as many copies as there were + * arguments, replacing one of the arguments with null and keeping + * the others. + * + * Also, if this was the first time we saw the signature we copy it + * *again*, replacing the argument with null, but annotating the + * argument's type as `null` explicitly. + */ + Set> uniqueSignatures = new HashSet<>(); + for (TestCaseSupplier original : testCaseSuppliers) { + boolean firstTimeSeenSignature = uniqueSignatures.add(original.types()); + for (int nullPosition = 0; nullPosition < original.types().size(); nullPosition++) { + int finalNullPosition = nullPosition; + suppliers.add(new TestCaseSupplier(original.name() + " null in " + nullPosition, original.types(), () -> { + TestCaseSupplier.TestCase oc = original.get(); + List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { + TestCaseSupplier.TypedData od = oc.getData().get(i); + return i == finalNullPosition ? od.withData(null) : od; + }).toList(); + TestCaseSupplier.TypedData nulledData = oc.getData().get(finalNullPosition); + return new TestCaseSupplier.TestCase( + data, + evaluatorToString.evaluatorToString(finalNullPosition, nulledData, oc.evaluatorToString()), + expectedType.expectedType(finalNullPosition, nulledData.type(), oc), + nullValue(), + null, + oc.getExpectedTypeError(), + null, + null + ); + })); + + if (firstTimeSeenSignature) { + List typesWithNull = IntStream.range(0, original.types().size()) + .mapToObj(i -> i == finalNullPosition ? DataType.NULL : original.types().get(i)) + .toList(); + boolean newSignature = uniqueSignatures.add(typesWithNull); + if (newSignature) { + suppliers.add(new TestCaseSupplier(typesWithNull, () -> { + TestCaseSupplier.TestCase oc = original.get(); + List data = IntStream.range(0, oc.getData().size()) + .mapToObj(i -> i == finalNullPosition ? TestCaseSupplier.TypedData.NULL : oc.getData().get(i)) + .toList(); + return new TestCaseSupplier.TestCase( + data, + equalTo("LiteralsEvaluator[lit=null]"), + expectedType.expectedType(finalNullPosition, DataType.NULL, oc), + nullValue(), + null, + oc.getExpectedTypeError(), + null, + null + ); + })); + } + } + } + } + + return suppliers; + + } + + /** + * Adds test cases containing unsupported parameter types that assert + * that they throw type errors. + */ + protected static List errorsForCasesWithoutExamples(List testCaseSuppliers) { + return errorsForCasesWithoutExamples(testCaseSuppliers, AbstractScalarFunctionTestCase::typeErrorMessage); + } + + protected static List errorsForCasesWithoutExamples( + List testCaseSuppliers, + TypeErrorMessageSupplier typeErrorMessageSupplier + ) { + typesRequired(testCaseSuppliers); + List suppliers = new ArrayList<>(testCaseSuppliers.size()); + suppliers.addAll(testCaseSuppliers); + + Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); + List> validPerPosition = validPerPosition(valid); + + testCaseSuppliers.stream() + .map(s -> s.types().size()) + .collect(Collectors.toSet()) + .stream() + .flatMap(count -> allPermutations(count)) + .filter(types -> valid.contains(types) == false) + /* + * Skip any cases with more than one null. Our tests don't generate + * the full combinatorial explosions of all nulls - just a single null. + * Hopefully , cases will function the same as , + * cases. + */.filter(types -> types.stream().filter(t -> t == DataType.NULL).count() <= 1) + .map(types -> typeErrorSupplier(validPerPosition.size() != 1, validPerPosition, types, typeErrorMessageSupplier)) + .forEach(suppliers::add); + return suppliers; + } + + public static String errorMessageStringForBinaryOperators( + boolean includeOrdinal, + List> validPerPosition, + List types + ) { + try { + return typeErrorMessage(includeOrdinal, validPerPosition, types); + } catch (IllegalStateException e) { + // This means all the positional args were okay, so the expected error is from the combination + if (types.get(0).equals(DataType.UNSIGNED_LONG)) { + return "first argument of [] is [unsigned_long] and second is [" + + types.get(1).typeName() + + "]. [unsigned_long] can only be operated on together with another [unsigned_long]"; + + } + if (types.get(1).equals(DataType.UNSIGNED_LONG)) { + return "first argument of [] is [" + + types.get(0).typeName() + + "] and second is [unsigned_long]. [unsigned_long] can only be operated on together with another [unsigned_long]"; + } + return "first argument of [] is [" + + (types.get(0).isNumeric() ? "numeric" : types.get(0).typeName()) + + "] so second argument must also be [" + + (types.get(0).isNumeric() ? "numeric" : types.get(0).typeName()) + + "] but was [" + + types.get(1).typeName() + + "]"; + + } + } + + /** + * Adds test cases containing unsupported parameter types that immediately fail. + */ + protected static List failureForCasesWithoutExamples(List testCaseSuppliers) { + typesRequired(testCaseSuppliers); + List suppliers = new ArrayList<>(testCaseSuppliers.size()); + suppliers.addAll(testCaseSuppliers); + + Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); + + testCaseSuppliers.stream() + .map(s -> s.types().size()) + .collect(Collectors.toSet()) + .stream() + .flatMap(count -> allPermutations(count)) + .filter(types -> valid.contains(types) == false) + .map(types -> new TestCaseSupplier("type error for " + TestCaseSupplier.nameFromTypes(types), types, () -> { + throw new IllegalStateException("must implement a case for " + types); + })) + .forEach(suppliers::add); + return suppliers; + } + + /** + * Validate that we know the types for all the test cases already created + * @param suppliers - list of suppliers before adding in the illegal type combinations + */ + private static void typesRequired(List suppliers) { + String bad = suppliers.stream().filter(s -> s.types() == null).map(s -> s.name()).collect(Collectors.joining("\n")); + if (bad.equals("") == false) { + throw new IllegalArgumentException("types required but not found for these tests:\n" + bad); + } + } + + private static List> validPerPosition(Set> valid) { + int max = valid.stream().mapToInt(List::size).max().getAsInt(); + List> result = new ArrayList<>(max); + for (int i = 0; i < max; i++) { + result.add(new HashSet<>()); + } + for (List signature : valid) { + for (int i = 0; i < signature.size(); i++) { + result.get(i).add(signature.get(i)); + } + } + return result; + } + + private static Stream> allPermutations(int argumentCount) { + if (argumentCount == 0) { + return Stream.of(List.of()); + } + if (argumentCount > 3) { + throw new IllegalArgumentException("would generate too many combinations"); + } + Stream> stream = representable().map(t -> List.of(t)); + for (int i = 1; i < argumentCount; i++) { + stream = stream.flatMap(types -> representable().map(t -> append(types, t))); + } + return stream; + } + + private static List append(List orig, DataType extra) { + List longer = new ArrayList<>(orig.size() + 1); + longer.addAll(orig); + longer.add(extra); + return longer; + } + + @FunctionalInterface + protected interface TypeErrorMessageSupplier { + String apply(boolean includeOrdinal, List> validPerPosition, List types); + } + + protected static TestCaseSupplier typeErrorSupplier( + boolean includeOrdinal, + List> validPerPosition, + List types + ) { + return typeErrorSupplier(includeOrdinal, validPerPosition, types, AbstractScalarFunctionTestCase::typeErrorMessage); + } + + /** + * Build a test case that asserts that the combination of parameter types is an error. + */ + protected static TestCaseSupplier typeErrorSupplier( + boolean includeOrdinal, + List> validPerPosition, + List types, + TypeErrorMessageSupplier errorMessageSupplier + ) { + return new TestCaseSupplier( + "type error for " + TestCaseSupplier.nameFromTypes(types), + types, + () -> TestCaseSupplier.TestCase.typeError( + types.stream().map(type -> new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, type.typeName())).toList(), + errorMessageSupplier.apply(includeOrdinal, validPerPosition, types) + ) + ); + } + + /** + * Build the expected error message for an invalid type signature. + */ + protected static String typeErrorMessage(boolean includeOrdinal, List> validPerPosition, List types) { + int badArgPosition = -1; + for (int i = 0; i < types.size(); i++) { + if (validPerPosition.get(i).contains(types.get(i)) == false) { + badArgPosition = i; + break; + } + } + if (badArgPosition == -1) { + throw new IllegalStateException( + "Can't generate error message for these types, you probably need a custom error message function" + ); + } + String ordinal = includeOrdinal ? TypeResolutions.ParamOrdinal.fromIndex(badArgPosition).name().toLowerCase(Locale.ROOT) + " " : ""; + String expectedType = expectedType(validPerPosition.get(badArgPosition)); + String name = types.get(badArgPosition).typeName(); + return ordinal + "argument of [] must be [" + expectedType + "], found value [" + name + "] type [" + name + "]"; + } + + private static final Map, String> NAMED_EXPECTED_TYPES = Map.ofEntries( + Map.entry( + Set.of(DataType.DATE_PERIOD, DataType.DOUBLE, DataType.INTEGER, DataType.LONG, DataType.TIME_DURATION, DataType.NULL), + "numeric, date_period or time_duration" + ), + Map.entry(Set.of(DataType.DATETIME, DataType.NULL), "datetime"), + Map.entry(Set.of(DataType.DOUBLE, DataType.NULL), "double"), + Map.entry(Set.of(DataType.INTEGER, DataType.NULL), "integer"), + Map.entry(Set.of(DataType.IP, DataType.NULL), "ip"), + Map.entry(Set.of(DataType.LONG, DataType.INTEGER, DataType.UNSIGNED_LONG, DataType.DOUBLE, DataType.NULL), "numeric"), + Map.entry(Set.of(DataType.LONG, DataType.INTEGER, DataType.UNSIGNED_LONG, DataType.DOUBLE), "numeric"), + Map.entry(Set.of(DataType.KEYWORD, DataType.TEXT, DataType.VERSION, DataType.NULL), "string or version"), + Map.entry(Set.of(DataType.KEYWORD, DataType.TEXT, DataType.NULL), "string"), + Map.entry(Set.of(DataType.IP, DataType.KEYWORD, DataType.TEXT, DataType.NULL), "ip or string"), + Map.entry(Set.copyOf(Arrays.asList(representableTypes())), "representable"), + Map.entry(Set.copyOf(Arrays.asList(representableNonSpatialTypes())), "representableNonSpatial"), + Map.entry( + Set.of( + DataType.BOOLEAN, + DataType.DOUBLE, + DataType.INTEGER, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.NULL + ), + "boolean or numeric or string" + ), + Map.entry( + Set.of( + DataType.DATETIME, + DataType.DOUBLE, + DataType.INTEGER, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.NULL + ), + "datetime or numeric or string" + ), + // What Add accepts + Map.entry( + Set.of( + DataType.DATE_PERIOD, + DataType.DATETIME, + DataType.DOUBLE, + DataType.INTEGER, + DataType.LONG, + DataType.NULL, + DataType.TIME_DURATION, + DataType.UNSIGNED_LONG + ), + "datetime or numeric" + ), + Map.entry( + Set.of( + DataType.BOOLEAN, + DataType.DATETIME, + DataType.DOUBLE, + DataType.INTEGER, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.NULL + ), + "boolean or datetime or numeric or string" + ), + // to_int + Map.entry( + Set.of( + DataType.BOOLEAN, + DataType.COUNTER_INTEGER, + DataType.DATETIME, + DataType.DOUBLE, + DataType.INTEGER, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.NULL + ), + "boolean or counter_integer or datetime or numeric or string" + ), + // to_long + Map.entry( + Set.of( + DataType.BOOLEAN, + DataType.COUNTER_INTEGER, + DataType.COUNTER_LONG, + DataType.DATETIME, + DataType.DOUBLE, + DataType.INTEGER, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.NULL + ), + "boolean or counter_integer or counter_long or datetime or numeric or string" + ), + // to_double + Map.entry( + Set.of( + DataType.BOOLEAN, + DataType.COUNTER_DOUBLE, + DataType.COUNTER_INTEGER, + DataType.COUNTER_LONG, + DataType.DATETIME, + DataType.DOUBLE, + DataType.INTEGER, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.NULL + ), + "boolean or counter_double or counter_integer or counter_long or datetime or numeric or string" + ), + Map.entry( + Set.of( + DataType.BOOLEAN, + DataType.CARTESIAN_POINT, + DataType.DATETIME, + DataType.DOUBLE, + DataType.GEO_POINT, + DataType.INTEGER, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.NULL + ), + "boolean or cartesian_point or datetime or geo_point or numeric or string" + ), + Map.entry( + Set.of( + DataType.DATETIME, + DataType.DOUBLE, + DataType.INTEGER, + DataType.IP, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.VERSION, + DataType.NULL + ), + "datetime, double, integer, ip, keyword, long, text, unsigned_long or version" + ), + Map.entry( + Set.of( + DataType.BOOLEAN, + DataType.DATETIME, + DataType.DOUBLE, + DataType.GEO_POINT, + DataType.GEO_SHAPE, + DataType.INTEGER, + DataType.IP, + DataType.KEYWORD, + DataType.LONG, + DataType.TEXT, + DataType.UNSIGNED_LONG, + DataType.VERSION, + DataType.NULL + ), + "cartesian_point or datetime or geo_point or numeric or string" + ), + Map.entry(Set.of(DataType.GEO_POINT, DataType.KEYWORD, DataType.TEXT, DataType.NULL), "geo_point or string"), + Map.entry(Set.of(DataType.CARTESIAN_POINT, DataType.KEYWORD, DataType.TEXT, DataType.NULL), "cartesian_point or string"), + Map.entry( + Set.of(DataType.GEO_POINT, DataType.GEO_SHAPE, DataType.KEYWORD, DataType.TEXT, DataType.NULL), + "geo_point or geo_shape or string" + ), + Map.entry( + Set.of(DataType.CARTESIAN_POINT, DataType.CARTESIAN_SHAPE, DataType.KEYWORD, DataType.TEXT, DataType.NULL), + "cartesian_point or cartesian_shape or string" + ), + Map.entry(Set.of(DataType.GEO_POINT, DataType.CARTESIAN_POINT, DataType.NULL), "geo_point or cartesian_point"), + Map.entry(Set.of(DataType.DATE_PERIOD, DataType.TIME_DURATION, DataType.NULL), "dateperiod or timeduration") + ); + + // TODO: generate this message dynamically, a la AbstractConvertFunction#supportedTypesNames()? + private static String expectedType(Set validTypes) { + String named = NAMED_EXPECTED_TYPES.get(validTypes); + if (named == null) { + /* + * Note for anyone who's test lands here - it's likely that you + * don't have a test case covering explicit `null` arguments in + * this position. Generally you can get that with anyNullIsNull. + */ + throw new UnsupportedOperationException( + "can't guess expected types for " + validTypes.stream().sorted(Comparator.comparing(t -> t.typeName())).toList() + ); + } + return named; + } + + protected static Stream representable() { + return DataType.types().stream().filter(EsqlDataTypes::isRepresentable); + } + + protected static DataType[] representableTypes() { + return representable().toArray(DataType[]::new); + } + + protected static Stream representableNonSpatial() { + return representable().filter(t -> isSpatial(t) == false); + } + + protected static DataType[] representableNonSpatialTypes() { + return representableNonSpatial().toArray(DataType[]::new); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java index 954d26b6de137..d25305a9ea190 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DeepCopy.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.Page; @@ -18,6 +19,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import java.io.IOException; import java.util.function.Function; /** @@ -28,6 +30,16 @@ public DeepCopy(Source source, Expression child) { super(source, child); } + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + @Override public EvalOperator.ExpressionEvaluator.Factory toEvaluator( Function toEvaluator diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistryTests.java index 4d50069d2f830..94549f6dfbdec 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistryTests.java @@ -7,32 +7,107 @@ package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.ParsingException; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.TestUtils; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionDefinition; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistryTests; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionResolutionStrategy; -import org.elasticsearch.xpack.esql.core.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.esql.core.session.Configuration; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.tree.SourceTests; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlConfigurationFunction; +import java.io.IOException; import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.EsqlTestUtils.randomConfiguration; -import static org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry.def; -import static org.elasticsearch.xpack.esql.core.expression.function.FunctionResolutionStrategy.DEFAULT; +import static org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry.def; +import static org.elasticsearch.xpack.esql.expression.function.FunctionResolutionStrategy.DEFAULT; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; public class EsqlFunctionRegistryTests extends ESTestCase { + public void testNoArgFunction() { + UnresolvedFunction ur = uf(DEFAULT); + EsqlFunctionRegistry r = new EsqlFunctionRegistry(def(DummyFunction.class, DummyFunction::new, "dummyFunction")); + FunctionDefinition def = r.resolveFunction(ur.name()); + assertEquals(ur.source(), ur.buildResolved(TestUtils.randomConfiguration(), def).source()); + } + + public void testBinaryFunction() { + UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class), mock(Expression.class)); + EsqlFunctionRegistry r = new EsqlFunctionRegistry(def(DummyFunction.class, (Source l, Expression lhs, Expression rhs) -> { + assertSame(lhs, ur.children().get(0)); + assertSame(rhs, ur.children().get(1)); + return new DummyFunction(l); + }, "dummyFunction")); + FunctionDefinition def = r.resolveFunction(ur.name()); + assertEquals(ur.source(), ur.buildResolved(TestUtils.randomConfiguration(), def).source()); + + // No children aren't supported + ParsingException e = expectThrows(ParsingException.class, () -> uf(DEFAULT).buildResolved(TestUtils.randomConfiguration(), def)); + assertThat(e.getMessage(), endsWith("expects exactly two arguments")); + + // One child isn't supported + e = expectThrows( + ParsingException.class, + () -> uf(DEFAULT, mock(Expression.class)).buildResolved(TestUtils.randomConfiguration(), def) + ); + assertThat(e.getMessage(), endsWith("expects exactly two arguments")); + + // Many children aren't supported + e = expectThrows( + ParsingException.class, + () -> uf(DEFAULT, mock(Expression.class), mock(Expression.class), mock(Expression.class)).buildResolved( + TestUtils.randomConfiguration(), + def + ) + ); + assertThat(e.getMessage(), endsWith("expects exactly two arguments")); + } + + public void testAliasNameIsTheSameAsAFunctionName() { + EsqlFunctionRegistry r = new EsqlFunctionRegistry(def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS")); + QlIllegalArgumentException iae = expectThrows( + QlIllegalArgumentException.class, + () -> r.register(def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "DUMMY_FUNCTION")) + ); + assertEquals("alias [DUMMY_FUNCTION] is used by [DUMMY_FUNCTION] and [DUMMY_FUNCTION2]", iae.getMessage()); + } + + public void testDuplicateAliasInTwoDifferentFunctionsFromTheSameBatch() { + QlIllegalArgumentException iae = expectThrows( + QlIllegalArgumentException.class, + () -> new EsqlFunctionRegistry( + def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS"), + def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "ALIAS") + ) + ); + assertEquals("alias [ALIAS] is used by [DUMMY_FUNCTION(ALIAS)] and [DUMMY_FUNCTION2]", iae.getMessage()); + } + + public void testDuplicateAliasInTwoDifferentFunctionsFromTwoDifferentBatches() { + EsqlFunctionRegistry r = new EsqlFunctionRegistry(def(DummyFunction.class, DummyFunction::new, "DUMMY_FUNCTION", "ALIAS")); + QlIllegalArgumentException iae = expectThrows( + QlIllegalArgumentException.class, + () -> r.register(def(DummyFunction2.class, DummyFunction2::new, "DUMMY_FUNCTION2", "ALIAS")) + ); + assertEquals("alias [ALIAS] is used by [DUMMY_FUNCTION] and [DUMMY_FUNCTION2]", iae.getMessage()); + } + public void testFunctionResolving() { UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class)); - FunctionRegistry r = new EsqlFunctionRegistry(defineDummyFunction(ur, "dummyfunction", "dummyfunc")); + EsqlFunctionRegistry r = new EsqlFunctionRegistry(defineDummyFunction(ur, "dummyfunction", "dummyfunc")); // Resolve by primary name FunctionDefinition def; @@ -63,7 +138,7 @@ public void testFunctionResolving() { public void testUnaryFunction() { UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class)); - FunctionRegistry r = new EsqlFunctionRegistry(defineDummyUnaryFunction(ur)); + EsqlFunctionRegistry r = new EsqlFunctionRegistry(defineDummyUnaryFunction(ur)); FunctionDefinition def = r.resolveFunction(ur.name()); // No children aren't supported @@ -78,14 +153,27 @@ public void testUnaryFunction() { assertThat(e.getMessage(), endsWith("expects exactly one argument")); } + public void testConfigurationOptionalFunction() { + UnresolvedFunction ur = uf(DEFAULT, mock(Expression.class)); + FunctionDefinition def; + EsqlFunctionRegistry r = new EsqlFunctionRegistry( + def(DummyConfigurationOptionalArgumentFunction.class, (Source l, Expression e, Configuration c) -> { + assertSame(e, ur.children().get(0)); + return new DummyConfigurationOptionalArgumentFunction(l, List.of(ur), c); + }, "dummy") + ); + def = r.resolveFunction(r.resolveAlias("DUMMY")); + assertEquals(ur.source(), ur.buildResolved(randomConfiguration(), def).source()); + } + private static UnresolvedFunction uf(FunctionResolutionStrategy resolutionStrategy, Expression... children) { return new UnresolvedFunction(SourceTests.randomSource(), "dummyFunction", resolutionStrategy, Arrays.asList(children)); } private static FunctionDefinition defineDummyFunction(UnresolvedFunction ur, String... names) { - return def(FunctionRegistryTests.DummyFunction.class, (Source l, Expression e) -> { + return def(DummyFunction.class, (Source l, Expression e) -> { assertSame(e, ur.children().get(0)); - return new FunctionRegistryTests.DummyFunction(l); + return new DummyFunction(l); }, names); } @@ -104,4 +192,80 @@ private String randomCapitalizedString(String input) { } return output.toString(); } + + public static class DummyFunction extends ScalarFunction { + public DummyFunction(Source source) { + super(source, emptyList()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public DataType dataType() { + return null; + } + } + + public static class DummyFunction2 extends DummyFunction { + public DummyFunction2(Source source) { + super(source); + } + } + + public static class DummyConfigurationOptionalArgumentFunction extends EsqlConfigurationFunction implements OptionalArgument { + + public DummyConfigurationOptionalArgumentFunction(Source source, List fields, Configuration configuration) { + super(source, fields, configuration); + } + + @Override + public void writeTo(StreamOutput out) { + throw new UnsupportedOperationException(); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + + @Override + public DataType dataType() { + return null; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new DummyConfigurationOptionalArgumentFunction(source(), newChildren, configuration()); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, DummyConfigurationOptionalArgumentFunction::new, children(), configuration()); + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator( + Function toEvaluator + ) { + return null; + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java index b4a5d3bdc2b92..9807cb5365e54 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FunctionName.java @@ -13,7 +13,7 @@ import java.lang.annotation.Target; /** - * Tests that extend AbstractFunctionTestCase can use this annotation to specify the name of the function + * Tests that extend {@link AbstractScalarFunctionTestCase} can use this annotation to specify the name of the function * to use when generating documentation files while running tests. * If this is not used, the name will be deduced from the test class name, by removing the "Test" suffix, and converting * the class name to snake case. This annotation can be used to override that behavior, for cases where the deduced name diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java new file mode 100644 index 0000000000000..68f5414302c9d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java @@ -0,0 +1,303 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomList; +import static org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier.TypedDataSupplier; + +/** + * Extension of {@link TestCaseSupplier} that provided multi-row test cases. + */ +public final class MultiRowTestCaseSupplier { + + private MultiRowTestCaseSupplier() {} + + public static List intCases(int minRows, int maxRows, int min, int max, boolean includeZero) { + List cases = new ArrayList<>(); + + if (0 <= max && 0 >= min && includeZero) { + cases.add(new TypedDataSupplier("<0 ints>", () -> randomList(minRows, maxRows, () -> 0), DataType.INTEGER, false, true)); + } + + if (max != 0) { + cases.add( + new TypedDataSupplier("<" + max + " ints>", () -> randomList(minRows, maxRows, () -> max), DataType.INTEGER, false, true) + ); + } + + if (min != 0 && min != max) { + cases.add( + new TypedDataSupplier("<" + min + " ints>", () -> randomList(minRows, maxRows, () -> min), DataType.INTEGER, false, true) + ); + } + + int lower = Math.max(min, 1); + int upper = Math.min(max, Integer.MAX_VALUE); + if (lower < upper) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomIntBetween(lower, upper)), + DataType.INTEGER, + false, + true + ) + ); + } + + int lower1 = Math.max(min, Integer.MIN_VALUE); + int upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomIntBetween(lower1, upper1)), + DataType.INTEGER, + false, + true + ) + ); + } + + if (min < 0 && max > 0) { + cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + if (includeZero) { + return ESTestCase.randomIntBetween(min, max); + } + return randomBoolean() ? ESTestCase.randomIntBetween(min, -1) : ESTestCase.randomIntBetween(1, max); + }), DataType.INTEGER, false, true)); + } + + return cases; + } + + public static List longCases(int minRows, int maxRows, long min, long max, boolean includeZero) { + List cases = new ArrayList<>(); + + if (0 <= max && 0 >= min && includeZero) { + cases.add(new TypedDataSupplier("<0 longs>", () -> randomList(minRows, maxRows, () -> 0L), DataType.LONG, false, true)); + } + + if (max != 0) { + cases.add( + new TypedDataSupplier("<" + max + " longs>", () -> randomList(minRows, maxRows, () -> max), DataType.LONG, false, true) + ); + } + + if (min != 0 && min != max) { + cases.add( + new TypedDataSupplier("<" + min + " longs>", () -> randomList(minRows, maxRows, () -> min), DataType.LONG, false, true) + ); + } + + long lower = Math.max(min, 1); + long upper = Math.min(max, Long.MAX_VALUE); + if (lower < upper) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(lower, upper)), + DataType.LONG, + false, + true + ) + ); + } + + long lower1 = Math.max(min, Long.MIN_VALUE); + long upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(lower1, upper1)), + DataType.LONG, + false, + true + ) + ); + } + + if (min < 0 && max > 0) { + cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + if (includeZero) { + return ESTestCase.randomLongBetween(min, max); + } + return randomBoolean() ? ESTestCase.randomLongBetween(min, -1) : ESTestCase.randomLongBetween(1, max); + }), DataType.LONG, false, true)); + } + + return cases; + } + + public static List doubleCases(int minRows, int maxRows, double min, double max, boolean includeZero) { + List cases = new ArrayList<>(); + + if (0d <= max && 0d >= min && includeZero) { + cases.add(new TypedDataSupplier("<0 doubles>", () -> randomList(minRows, maxRows, () -> 0d), DataType.DOUBLE, false, true)); + cases.add(new TypedDataSupplier("<-0 doubles>", () -> randomList(minRows, maxRows, () -> -0d), DataType.DOUBLE, false, true)); + } + + if (max != 0d) { + cases.add( + new TypedDataSupplier("<" + max + " doubles>", () -> randomList(minRows, maxRows, () -> max), DataType.DOUBLE, false, true) + ); + } + + if (min != 0d && min != max) { + cases.add( + new TypedDataSupplier("<" + min + " doubles>", () -> randomList(minRows, maxRows, () -> min), DataType.DOUBLE, false, true) + ); + } + + double lower1 = Math.max(min, 0d); + double upper1 = Math.min(max, 1d); + if (lower1 < upper1) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower1, upper1, true)), + DataType.DOUBLE, + false, + true + ) + ); + } + + double lower2 = Math.max(min, -1d); + double upper2 = Math.min(max, 0d); + if (lower2 < upper2) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower2, upper2, true)), + DataType.DOUBLE, + false, + true + ) + ); + } + + double lower3 = Math.max(min, 1d); + double upper3 = Math.min(max, Double.MAX_VALUE); + if (lower3 < upper3) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower3, upper3, true)), + DataType.DOUBLE, + false, + true + ) + ); + } + + double lower4 = Math.max(min, -Double.MAX_VALUE); + double upper4 = Math.min(max, -1d); + if (lower4 < upper4) { + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower4, upper4, true)), + DataType.DOUBLE, + false, + true + ) + ); + } + + if (min < 0 && max > 0) { + cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + if (includeZero) { + return ESTestCase.randomDoubleBetween(min, max, true); + } + return randomBoolean() ? ESTestCase.randomDoubleBetween(min, -1, true) : ESTestCase.randomDoubleBetween(1, max, true); + }), DataType.DOUBLE, false, true)); + } + + return cases; + } + + public static List dateCases(int minRows, int maxRows) { + List cases = new ArrayList<>(); + + cases.add( + new TypedDataSupplier( + "<1970-01-01T00:00:00Z dates>", + () -> randomList(minRows, maxRows, () -> 0L), + DataType.DATETIME, + false, + true + ) + ); + + cases.add( + new TypedDataSupplier( + "", + // 1970-01-01T00:00:00Z - 2286-11-20T17:46:40Z + () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11)), + DataType.DATETIME, + false, + true + ) + ); + + cases.add( + new TypedDataSupplier( + "", + // 2286-11-20T17:46:40Z - +292278994-08-17T07:12:55.807Z + () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE)), + DataType.DATETIME, + false, + true + ) + ); + + cases.add( + new TypedDataSupplier( + "", + // very close to +292278994-08-17T07:12:55.807Z, the maximum supported millis since epoch + () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE)), + DataType.DATETIME, + false, + true + ) + ); + + return cases; + } + + public static List booleanCases(int minRows, int maxRows) { + List cases = new ArrayList<>(); + + cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> true), DataType.BOOLEAN, false, true)); + + cases.add( + new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> false), DataType.BOOLEAN, false, true) + ); + + cases.add( + new TypedDataSupplier( + "", + () -> randomList(minRows, maxRows, ESTestCase::randomBoolean), + DataType.BOOLEAN, + false, + true + ) + ); + + return cases; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java index 6ef370fd2da35..4e00fa9f41fbd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java @@ -20,7 +20,6 @@ import net.nextencia.rrdiagram.grammar.rrdiagram.RRText; import org.elasticsearch.common.util.LazyInitializable; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionDefinition; import java.awt.Font; import java.awt.FontFormatException; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 7eadad58ec09b..6ece7151ccd7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -820,6 +820,12 @@ public static void unary( unary(suppliers, expectedEvaluatorToString, valueSuppliers, expectedOutputType, expected, unused -> warnings); } + /** + * Generate cases for {@link DataType#INTEGER}. + *

      + * For multi-row parameters, see {@link MultiRowTestCaseSupplier#intCases}. + *

      + */ public static List intCases(int min, int max, boolean includeZero) { List cases = new ArrayList<>(); if (0 <= max && 0 >= min && includeZero) { @@ -844,6 +850,12 @@ public static List intCases(int min, int max, boolean include return cases; } + /** + * Generate cases for {@link DataType#LONG}. + *

      + * For multi-row parameters, see {@link MultiRowTestCaseSupplier#longCases}. + *

      + */ public static List longCases(long min, long max, boolean includeZero) { List cases = new ArrayList<>(); if (0L <= max && 0L >= min && includeZero) { @@ -909,6 +921,12 @@ public static List ulongCases(BigInteger min, BigInteger max, return cases; } + /** + * Generate cases for {@link DataType#DOUBLE}. + *

      + * For multi-row parameters, see {@link MultiRowTestCaseSupplier#doubleCases}. + *

      + */ public static List doubleCases(double min, double max, boolean includeZero) { List cases = new ArrayList<>(); @@ -980,6 +998,12 @@ public static List booleanCases() { ); } + /** + * Generate cases for {@link DataType#DATETIME}. + *

      + * For multi-row parameters, see {@link MultiRowTestCaseSupplier#dateCases}. + *

      + */ public static List dateCases() { return List.of( new TypedDataSupplier("<1970-01-01T00:00:00Z>", () -> 0L, DataType.DATETIME), @@ -1301,6 +1325,14 @@ public List getDataValues() { return data.stream().filter(d -> d.forceLiteral == false).map(TypedData::data).collect(Collectors.toList()); } + public List getMultiRowFields() { + return data.stream().filter(TypedData::isMultiRow).collect(Collectors.toList()); + } + + public boolean canGetDataAsLiterals() { + return data.stream().noneMatch(d -> d.isMultiRow() && d.multiRowData().size() != 1); + } + public boolean canBuildEvaluator() { return canBuildEvaluator; } @@ -1363,14 +1395,18 @@ public Matcher evaluatorToString() { * exists because we can't generate random values from the test parameter generation functions, and instead need to return * suppliers which generate the random values at test execution time. */ - public record TypedDataSupplier(String name, Supplier supplier, DataType type, boolean forceLiteral) { + public record TypedDataSupplier(String name, Supplier supplier, DataType type, boolean forceLiteral, boolean multiRow) { + + public TypedDataSupplier(String name, Supplier supplier, DataType type, boolean forceLiteral) { + this(name, supplier, type, forceLiteral, false); + } public TypedDataSupplier(String name, Supplier supplier, DataType type) { - this(name, supplier, type, false); + this(name, supplier, type, false, false); } public TypedData get() { - return new TypedData(supplier.get(), type, name, forceLiteral); + return new TypedData(supplier.get(), type, name, forceLiteral, multiRow); } } @@ -1384,14 +1420,19 @@ public static class TypedData { private final DataType type; private final String name; private final boolean forceLiteral; + private final boolean multiRow; /** * @param data value to test against * @param type type of the value, for building expressions * @param name a name for the value, used for generating test case names * @param forceLiteral should this data always be converted to a literal and never to a field reference? + * @param multiRow if true, data is expected to be a List of values, one per row */ - private TypedData(Object data, DataType type, String name, boolean forceLiteral) { + private TypedData(Object data, DataType type, String name, boolean forceLiteral, boolean multiRow) { + assert multiRow == false || data instanceof List : "multiRow data must be a List"; + assert multiRow == false || forceLiteral == false : "multiRow data can't be converted to a literal"; + if (type == DataType.UNSIGNED_LONG && data instanceof BigInteger b) { this.data = NumericUtils.asLongUnsigned(b); } else { @@ -1400,6 +1441,7 @@ private TypedData(Object data, DataType type, String name, boolean forceLiteral) this.type = type; this.name = name; this.forceLiteral = forceLiteral; + this.multiRow = multiRow; } /** @@ -1408,7 +1450,7 @@ private TypedData(Object data, DataType type, String name, boolean forceLiteral) * @param name a name for the value, used for generating test case names */ public TypedData(Object data, DataType type, String name) { - this(data, type, name, false); + this(data, type, name, false, false); } /** @@ -1420,13 +1462,23 @@ public TypedData(Object data, String name) { this(data, DataType.fromJava(data), name); } + /** + * Create a TypedData object for field to be aggregated. + * @param data values to test against, one per row + * @param type type of the value, for building expressions + * @param name a name for the value, used for generating test case names + */ + public static TypedData multiRow(List data, DataType type, String name) { + return new TypedData(data, type, name, false, true); + } + /** * Return a {@link TypedData} that always returns a {@link Literal} from * {@link #asField} and {@link #asDeepCopyOfField}. Use this for things that * must be constants. */ public TypedData forceLiteral() { - return new TypedData(data, type, name, true); + return new TypedData(data, type, name, true, multiRow); } /** @@ -1437,11 +1489,19 @@ public boolean isForceLiteral() { } /** - * Return a {@link TypedData} that always returns {@code null} for it's - * value without modifying anything else in the supplier. + * If true, the data is expected to be a List of values, one per row. + */ + public boolean isMultiRow() { + return multiRow; + } + + /** + * Return a {@link TypedData} with the new data. + * + * @param data The new data for the {@link TypedData}. */ - public TypedData forceValueToNull() { - return new TypedData(null, type, name, forceLiteral); + public TypedData withData(Object data) { + return new TypedData(data, type, name, forceLiteral, multiRow); } @Override @@ -1476,6 +1536,15 @@ public Expression asDeepCopyOfField() { * Convert this into a {@link Literal}. */ public Literal asLiteral() { + if (multiRow) { + var values = multiRowData(); + + if (values.size() != 1) { + throw new IllegalStateException("Multirow values require exactly 1 element to be a literal, got " + values.size()); + } + + return new Literal(Source.synthetic(name), values, type); + } return new Literal(Source.synthetic(name), data, type); } @@ -1486,6 +1555,14 @@ public Object data() { return data; } + /** + * Values to test against. + */ + @SuppressWarnings("unchecked") + public List multiRowData() { + return (List) data; + } + /** * @return the data value being supplied, casting unsigned longs into BigIntegers correctly */ diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/UnresolvedFunctionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnresolvedFunctionTests.java similarity index 99% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/UnresolvedFunctionTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnresolvedFunctionTests.java index 9d29aaf63139f..7cb547876e532 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/expression/function/UnresolvedFunctionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnresolvedFunctionTests.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.expression.function; +package org.elasticsearch.xpack.esql.expression.function; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.AbstractNodeTestCase; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgSerializationTests.java new file mode 100644 index 0000000000000..52d3128af5c1c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class AvgSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Avg createTestInstance() { + return new Avg(randomSource(), randomChild()); + } + + @Override + protected Avg mutateInstance(Avg instance) throws IOException { + return new Avg(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java new file mode 100644 index 0000000000000..f456bd409059a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; +import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class AvgTests extends AbstractAggregationTestCase { + public AvgTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + var suppliers = new ArrayList(); + + Stream.of( + MultiRowTestCaseSupplier.intCases(1, 1000, Integer.MIN_VALUE, Integer.MAX_VALUE, true), + MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), + MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true) + ).flatMap(List::stream).map(AvgTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); + + suppliers.add( + // Folding + new TestCaseSupplier( + List.of(DataType.INTEGER), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200), DataType.INTEGER, "field")), + "Avg[field=Attribute[channel=0]]", + DataType.DOUBLE, + equalTo(200.) + ) + ) + ); + + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + } + + @Override + protected Expression build(Source source, List args) { + return new Avg(source, args.get(0)); + } + + private static TestCaseSupplier makeSupplier(TestCaseSupplier.TypedDataSupplier fieldSupplier) { + return new TestCaseSupplier(List.of(fieldSupplier.type()), () -> { + var fieldTypedData = fieldSupplier.get(); + + Object expected = switch (fieldTypedData.type().widenSmallNumeric()) { + case INTEGER -> fieldTypedData.multiRowData() + .stream() + .map(v -> (Integer) v) + .collect(Collectors.summarizingInt(Integer::intValue)) + .getAverage(); + case LONG -> fieldTypedData.multiRowData() + .stream() + .map(v -> (Long) v) + .collect(Collectors.summarizingLong(Long::longValue)) + .getAverage(); + case DOUBLE -> fieldTypedData.multiRowData() + .stream() + .map(v -> (Double) v) + .collect(Collectors.summarizingDouble(Double::doubleValue)) + .getAverage(); + default -> throw new IllegalStateException("Unexpected value: " + fieldTypedData.type()); + }; + + return new TestCaseSupplier.TestCase( + List.of(fieldTypedData), + "Avg[field=Attribute[channel=0]]", + DataType.DOUBLE, + equalTo(expected) + ); + }); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinctSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinctSerializationTests.java new file mode 100644 index 0000000000000..ab06b0b58f7f0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinctSerializationTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class CountDistinctSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected CountDistinct createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression precision = randomBoolean() ? null : randomChild(); + return new CountDistinct(source, field, precision); + } + + @Override + protected CountDistinct mutateInstance(CountDistinct instance) throws IOException { + Source source = randomSource(); + Expression field = instance.field(); + Expression precision = instance.precision(); + if (randomBoolean()) { + field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } else { + precision = randomValueOtherThan(precision, () -> randomBoolean() ? null : randomChild()); + } + return new CountDistinct(source, field, precision); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountSerializationTests.java new file mode 100644 index 0000000000000..133979f66860c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class CountSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Count createTestInstance() { + return new Count(randomSource(), randomChild()); + } + + @Override + protected Count mutateInstance(Count instance) throws IOException { + return new Count(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxSerializationTests.java new file mode 100644 index 0000000000000..7a732883a99a5 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MaxSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Max createTestInstance() { + return new Max(randomSource(), randomChild()); + } + + @Override + protected Max mutateInstance(Max instance) throws IOException { + return new Max(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java new file mode 100644 index 0000000000000..3fddaff226f3e --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; +import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class MaxTests extends AbstractAggregationTestCase { + public MaxTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + var suppliers = new ArrayList(); + + Stream.of( + MultiRowTestCaseSupplier.intCases(1, 1000, Integer.MIN_VALUE, Integer.MAX_VALUE, true), + MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), + MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), + MultiRowTestCaseSupplier.dateCases(1, 1000), + MultiRowTestCaseSupplier.booleanCases(1, 1000) + ).flatMap(List::stream).map(MaxTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); + + suppliers.addAll( + List.of( + // Surrogates + new TestCaseSupplier( + List.of(DataType.INTEGER), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5, 8, -2, 0, 200), DataType.INTEGER, "field")), + "Max[field=Attribute[channel=0]]", + DataType.INTEGER, + equalTo(200) + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, -2L, 0L, 200L), DataType.LONG, "field")), + "Max[field=Attribute[channel=0]]", + DataType.LONG, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.DOUBLE), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5., 8., -2., 0., 200.), DataType.DOUBLE, "field")), + "Max[field=Attribute[channel=0]]", + DataType.DOUBLE, + equalTo(200.) + ) + ), + new TestCaseSupplier( + List.of(DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.DATETIME, "field")), + "Max[field=Attribute[channel=0]]", + DataType.DATETIME, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true, false, false, true), DataType.BOOLEAN, "field")), + "Max[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(true) + ) + ), + + // Folding + new TestCaseSupplier( + List.of(DataType.INTEGER), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200), DataType.INTEGER, "field")), + "Max[field=Attribute[channel=0]]", + DataType.INTEGER, + equalTo(200) + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.LONG, "field")), + "Max[field=Attribute[channel=0]]", + DataType.LONG, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.DOUBLE), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200.), DataType.DOUBLE, "field")), + "Max[field=Attribute[channel=0]]", + DataType.DOUBLE, + equalTo(200.) + ) + ), + new TestCaseSupplier( + List.of(DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.DATETIME, "field")), + "Max[field=Attribute[channel=0]]", + DataType.DATETIME, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true), DataType.BOOLEAN, "field")), + "Max[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(true) + ) + ) + ) + ); + + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + } + + @Override + protected Expression build(Source source, List args) { + return new Max(source, args.get(0)); + } + + @SuppressWarnings("unchecked") + private static TestCaseSupplier makeSupplier(TestCaseSupplier.TypedDataSupplier fieldSupplier) { + return new TestCaseSupplier(fieldSupplier.name(), List.of(fieldSupplier.type()), () -> { + var fieldTypedData = fieldSupplier.get(); + var expected = fieldTypedData.multiRowData() + .stream() + .map(v -> (Comparable>) v) + .max(Comparator.naturalOrder()) + .orElse(null); + + return new TestCaseSupplier.TestCase( + List.of(fieldTypedData), + "Max[field=Attribute[channel=0]]", + fieldSupplier.type(), + equalTo(expected) + ); + }); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviationSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviationSerializationTests.java new file mode 100644 index 0000000000000..bdbe839c46a75 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviationSerializationTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MedianAbsoluteDeviationSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected MedianAbsoluteDeviation createTestInstance() { + return new MedianAbsoluteDeviation(randomSource(), randomChild()); + } + + @Override + protected MedianAbsoluteDeviation mutateInstance(MedianAbsoluteDeviation instance) throws IOException { + return new MedianAbsoluteDeviation( + instance.source(), + randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild) + ); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianSerializationTests.java new file mode 100644 index 0000000000000..75161977319ea --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MedianSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Median createTestInstance() { + return new Median(randomSource(), randomChild()); + } + + @Override + protected Median mutateInstance(Median instance) throws IOException { + return new Median(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinSerializationTests.java new file mode 100644 index 0000000000000..1ff434d8d2a76 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class MinSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Min createTestInstance() { + return new Min(randomSource(), randomChild()); + } + + @Override + protected Min mutateInstance(Min instance) throws IOException { + return new Min(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java new file mode 100644 index 0000000000000..6f59928059bec --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; +import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class MinTests extends AbstractAggregationTestCase { + public MinTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + var suppliers = new ArrayList(); + + Stream.of( + MultiRowTestCaseSupplier.intCases(1, 1000, Integer.MIN_VALUE, Integer.MAX_VALUE, true), + MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), + MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), + MultiRowTestCaseSupplier.dateCases(1, 1000), + MultiRowTestCaseSupplier.booleanCases(1, 1000) + ).flatMap(List::stream).map(MinTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); + + suppliers.addAll( + List.of( + // Surrogates + new TestCaseSupplier( + List.of(DataType.INTEGER), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5, 8, -2, 0, 200), DataType.INTEGER, "field")), + "Min[field=Attribute[channel=0]]", + DataType.INTEGER, + equalTo(-2) + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, -2L, 0L, 200L), DataType.LONG, "field")), + "Min[field=Attribute[channel=0]]", + DataType.LONG, + equalTo(-2L) + ) + ), + new TestCaseSupplier( + List.of(DataType.DOUBLE), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5., 8., -2., 0., 200.), DataType.DOUBLE, "field")), + "Min[field=Attribute[channel=0]]", + DataType.DOUBLE, + equalTo(-2.) + ) + ), + new TestCaseSupplier( + List.of(DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.DATETIME, "field")), + "Min[field=Attribute[channel=0]]", + DataType.DATETIME, + equalTo(0L) + ) + ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true, false, false, true), DataType.BOOLEAN, "field")), + "Min[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(false) + ) + ), + + // Folding + new TestCaseSupplier( + List.of(DataType.INTEGER), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200), DataType.INTEGER, "field")), + "Min[field=Attribute[channel=0]]", + DataType.INTEGER, + equalTo(200) + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.LONG, "field")), + "Min[field=Attribute[channel=0]]", + DataType.LONG, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.DOUBLE), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200.), DataType.DOUBLE, "field")), + "Min[field=Attribute[channel=0]]", + DataType.DOUBLE, + equalTo(200.) + ) + ), + new TestCaseSupplier( + List.of(DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.DATETIME, "field")), + "Min[field=Attribute[channel=0]]", + DataType.DATETIME, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.BOOLEAN), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(true), DataType.BOOLEAN, "field")), + "Min[field=Attribute[channel=0]]", + DataType.BOOLEAN, + equalTo(true) + ) + ) + ) + ); + + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + } + + @Override + protected Expression build(Source source, List args) { + return new Min(source, args.get(0)); + } + + @SuppressWarnings("unchecked") + private static TestCaseSupplier makeSupplier(TestCaseSupplier.TypedDataSupplier fieldSupplier) { + return new TestCaseSupplier(fieldSupplier.name(), List.of(fieldSupplier.type()), () -> { + var fieldTypedData = fieldSupplier.get(); + var expected = fieldTypedData.multiRowData() + .stream() + .map(v -> (Comparable>) v) + .min(Comparator.naturalOrder()) + .orElse(null); + + return new TestCaseSupplier.TestCase( + List.of(fieldTypedData), + "Min[field=Attribute[channel=0]]", + fieldSupplier.type(), + equalTo(expected) + ); + }); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileSerializationTests.java new file mode 100644 index 0000000000000..a6349b9cb5c81 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/PercentileSerializationTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class PercentileSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Percentile createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression percentile = randomChild(); + return new Percentile(source, field, percentile); + } + + @Override + protected Percentile mutateInstance(Percentile instance) throws IOException { + Source source = instance.source(); + Expression field = instance.field(); + Expression percentile = instance.percentile(); + if (randomBoolean()) { + field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } else { + percentile = randomValueOtherThan(percentile, AbstractExpressionSerializationTests::randomChild); + } + return new Percentile(source, field, percentile); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/RateSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/RateSerializationTests.java new file mode 100644 index 0000000000000..94b2a81b308d7 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/RateSerializationTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class RateSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Rate createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression timestamp = randomChild(); + Expression unit = randomBoolean() ? null : randomChild(); + return new Rate(source, field, timestamp, unit); + } + + @Override + protected Rate mutateInstance(Rate instance) throws IOException { + Source source = randomSource(); + Expression field = instance.field(); + Expression timestamp = instance.timestamp(); + Expression unit = instance.unit(); + switch (between(0, 2)) { + case 0 -> field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + case 1 -> timestamp = randomValueOtherThan(timestamp, AbstractExpressionSerializationTests::randomChild); + case 2 -> unit = randomValueOtherThan(unit, () -> randomBoolean() ? null : randomChild()); + } + return new Rate(source, field, timestamp, unit); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroidSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroidSerializationTests.java new file mode 100644 index 0000000000000..037b7dc229b03 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroidSerializationTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class SpatialCentroidSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected SpatialCentroid createTestInstance() { + return new SpatialCentroid(randomSource(), randomChild()); + } + + @Override + protected SpatialCentroid mutateInstance(SpatialCentroid instance) throws IOException { + return new SpatialCentroid( + instance.source(), + randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild) + ); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SumSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SumSerializationTests.java new file mode 100644 index 0000000000000..863392f7eb451 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SumSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class SumSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Sum createTestInstance() { + return new Sum(randomSource(), randomChild()); + } + + @Override + protected Sum mutateInstance(Sum instance) throws IOException { + return new Sum(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopSerializationTests.java new file mode 100644 index 0000000000000..82bf57d1a194e --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopSerializationTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class TopSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Top createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression limit = randomChild(); + Expression order = randomChild(); + return new Top(source, field, limit, order); + } + + @Override + protected Top mutateInstance(Top instance) throws IOException { + Source source = instance.source(); + Expression field = instance.field(); + Expression limit = instance.limitField(); + Expression order = instance.orderField(); + switch (between(0, 2)) { + case 0 -> field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + case 1 -> limit = randomValueOtherThan(limit, AbstractExpressionSerializationTests::randomChild); + case 2 -> order = randomValueOtherThan(order, AbstractExpressionSerializationTests::randomChild); + } + return new Top(source, field, limit, order); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java new file mode 100644 index 0000000000000..c0c23ce29301e --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; +import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class TopTests extends AbstractAggregationTestCase { + public TopTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + var suppliers = new ArrayList(); + + for (var limitCaseSupplier : TestCaseSupplier.intCases(1, 1000, false)) { + for (String order : List.of("asc", "desc")) { + Stream.of( + MultiRowTestCaseSupplier.intCases(1, 1000, Integer.MIN_VALUE, Integer.MAX_VALUE, true), + MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), + MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), + MultiRowTestCaseSupplier.dateCases(1, 1000) + ) + .flatMap(List::stream) + .map(fieldCaseSupplier -> TopTests.makeSupplier(fieldCaseSupplier, limitCaseSupplier, order)) + .collect(Collectors.toCollection(() -> suppliers)); + } + } + + suppliers.addAll( + List.of( + // Surrogates + new TestCaseSupplier( + List.of(DataType.INTEGER, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5, 8, -2, 0, 200), DataType.INTEGER, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.INTEGER, + equalTo(200) + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, -2L, 0L, 200L), DataType.LONG, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.LONG, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.DOUBLE, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5., 8., -2., 0., 200.), DataType.DOUBLE, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.DOUBLE, + equalTo(200.) + ) + ), + new TestCaseSupplier( + List.of(DataType.DATETIME, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.DATETIME, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.DATETIME, + equalTo(200L) + ) + ), + + // Folding + new TestCaseSupplier( + List.of(DataType.INTEGER, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(200), DataType.INTEGER, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.INTEGER, + equalTo(200) + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.LONG, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.LONG, + equalTo(200L) + ) + ), + new TestCaseSupplier( + List.of(DataType.DOUBLE, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(200.), DataType.DOUBLE, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.DOUBLE, + equalTo(200.) + ) + ), + new TestCaseSupplier( + List.of(DataType.DATETIME, DataType.INTEGER, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.DATETIME, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + DataType.DATETIME, + equalTo(200L) + ) + ), + + // Resolution errors + new TestCaseSupplier( + List.of(DataType.LONG, DataType.INTEGER, DataType.KEYWORD), + () -> TestCaseSupplier.TestCase.typeError( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.LONG, "field"), + new TestCaseSupplier.TypedData(0, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "Limit must be greater than 0 in [], found [0]" + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG, DataType.INTEGER, DataType.KEYWORD), + () -> TestCaseSupplier.TestCase.typeError( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.LONG, "field"), + new TestCaseSupplier.TypedData(2, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("wrong-order"), DataType.KEYWORD, "order").forceLiteral() + ), + "Invalid order value in [], expected [ASC, DESC] but got [wrong-order]" + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG, DataType.INTEGER, DataType.KEYWORD), + () -> TestCaseSupplier.TestCase.typeError( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.LONG, "field"), + new TestCaseSupplier.TypedData(null, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(new BytesRef("desc"), DataType.KEYWORD, "order").forceLiteral() + ), + "second argument of [] cannot be null, received [limit]" + ) + ), + new TestCaseSupplier( + List.of(DataType.LONG, DataType.INTEGER, DataType.KEYWORD), + () -> TestCaseSupplier.TestCase.typeError( + List.of( + TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.LONG, "field"), + new TestCaseSupplier.TypedData(1, DataType.INTEGER, "limit").forceLiteral(), + new TestCaseSupplier.TypedData(null, DataType.KEYWORD, "order").forceLiteral() + ), + "third argument of [] cannot be null, received [order]" + ) + ) + ) + ); + + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + } + + @Override + protected Expression build(Source source, List args) { + return new Top(source, args.get(0), args.get(1), args.get(2)); + } + + @SuppressWarnings("unchecked") + private static TestCaseSupplier makeSupplier( + TestCaseSupplier.TypedDataSupplier fieldSupplier, + TestCaseSupplier.TypedDataSupplier limitCaseSupplier, + String order + ) { + return new TestCaseSupplier(List.of(fieldSupplier.type(), DataType.INTEGER, DataType.KEYWORD), () -> { + var fieldTypedData = fieldSupplier.get(); + var limitTypedData = limitCaseSupplier.get().forceLiteral(); + var limit = (int) limitTypedData.getValue(); + var expected = fieldTypedData.multiRowData() + .stream() + .map(v -> (Comparable>) v) + .sorted(order.equals("asc") ? Comparator.naturalOrder() : Comparator.reverseOrder()) + .limit(limit) + .toList(); + + return new TestCaseSupplier.TestCase( + List.of( + fieldTypedData, + limitTypedData, + new TestCaseSupplier.TypedData(new BytesRef(order), DataType.KEYWORD, order + " order").forceLiteral() + ), + "Top[field=Attribute[channel=0], limit=Attribute[channel=1], order=Attribute[channel=2]]", + fieldSupplier.type(), + equalTo(expected.size() == 1 ? expected.get(0) : expected) + ); + }); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesSerializationTests.java new file mode 100644 index 0000000000000..6787e8d1ad09a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class ValuesSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Values createTestInstance() { + return new Values(randomSource(), randomChild()); + } + + @Override + protected Values mutateInstance(Values instance) throws IOException { + return new Values(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketSerializationTests.java new file mode 100644 index 0000000000000..3b38c31e760be --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketSerializationTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.grouping; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class BucketSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Bucket createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression buckets = randomChild(); + Expression from = randomChild(); + Expression to = randomChild(); + return new Bucket(source, field, buckets, from, to); + } + + @Override + protected Bucket mutateInstance(Bucket instance) throws IOException { + Source source = instance.source(); + Expression field = instance.field(); + Expression buckets = instance.buckets(); + Expression from = instance.from(); + Expression to = instance.to(); + switch (between(0, 3)) { + case 0 -> field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + case 1 -> buckets = randomValueOtherThan(buckets, AbstractExpressionSerializationTests::randomChild); + case 2 -> from = randomValueOtherThan(from, AbstractExpressionSerializationTests::randomChild); + case 3 -> to = randomValueOtherThan(to, AbstractExpressionSerializationTests::randomChild); + } + return new Bucket(source, field, buckets, from, to); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/BucketTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java similarity index 97% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/BucketTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java index c4e614be94438..9100e71de76df 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/BucketTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/BucketTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.expression.function.scalar.math; +package org.elasticsearch.xpack.esql.expression.function.grouping; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -16,9 +16,8 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.hamcrest.Matcher; import java.time.Duration; @@ -30,7 +29,7 @@ import static org.hamcrest.Matchers.equalTo; -public class BucketTests extends AbstractFunctionTestCase { +public class BucketTests extends AbstractScalarFunctionTestCase { public BucketTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java index 074fe9e159023..760c57f6570bb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.StringUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -22,7 +22,7 @@ import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; -public abstract class AbstractConfigurationFunctionTestCase extends AbstractFunctionTestCase { +public abstract class AbstractConfigurationFunctionTestCase extends AbstractScalarFunctionTestCase { protected abstract Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration); @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AndSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AndSerializationTests.java new file mode 100644 index 0000000000000..40788eb7a2b2d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AndSerializationTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class AndSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected And createTestInstance() { + Source source = randomSource(); + Expression left = randomChild(); + Expression right = randomChild(); + return new And(source, left, right); + } + + @Override + protected And mutateInstance(And instance) throws IOException { + Source source = instance.source(); + Expression left = instance.left(); + Expression right = instance.right(); + if (randomBoolean()) { + left = randomValueOtherThan(left, AbstractExpressionSerializationTests::randomChild); + } else { + right = randomValueOtherThan(right, AbstractExpressionSerializationTests::randomChild); + } + return new And(source, left, right); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/NotSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/NotSerializationTests.java index 61e3690f1633f..aa8bad907eeb3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/NotSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/NotSerializationTests.java @@ -7,21 +7,14 @@ package org.elasticsearch.xpack.esql.expression.function.scalar; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; import java.io.IOException; -import java.util.List; public class NotSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return UnaryScalarFunction.getNamedWriteables(); - } - @Override protected Not createTestInstance() { return new Not(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/OrSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/OrSerializationTests.java new file mode 100644 index 0000000000000..5e68a4af80623 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/OrSerializationTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class OrSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Or createTestInstance() { + Source source = randomSource(); + Expression left = randomChild(); + Expression right = randomChild(); + return new Or(source, left, right); + } + + @Override + protected Or mutateInstance(Or instance) throws IOException { + Source source = instance.source(); + Expression left = instance.left(); + Expression right = instance.right(); + if (randomBoolean()) { + left = randomValueOtherThan(left, AbstractExpressionSerializationTests::randomChild); + } else { + right = randomValueOtherThan(right, AbstractExpressionSerializationTests::randomChild); + } + return new Or(source, left, right); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseSerializationTests.java index 69bbf2f76937f..06df15dd68827 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseSerializationTests.java @@ -7,20 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.util.List; public class CaseSerializationTests extends AbstractVarargsSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected Case create(Source source, Expression first, List rest) { return new Case(source, first, rest); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index 02da8ea22a6a0..0a03af206b846 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -public class CaseTests extends AbstractFunctionTestCase { +public class CaseTests extends AbstractScalarFunctionTestCase { public CaseTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestSerializationTests.java index 43e1fe405911a..42b0203bc79ca 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestSerializationTests.java @@ -7,20 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.util.List; public class GreatestSerializationTests extends AbstractVarargsSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected Greatest create(Source source, Expression first, List rest) { return new Greatest(source, first, rest); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java index 9376849d8136c..7cc03be7d6273 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.VaragsTestCaseBuilder; @@ -26,7 +26,7 @@ import static org.hamcrest.Matchers.equalTo; -public class GreatestTests extends AbstractFunctionTestCase { +public class GreatestTests extends AbstractScalarFunctionTestCase { public GreatestTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastSerializationTests.java index f552713af4dbe..fc9e8e5522a19 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastSerializationTests.java @@ -7,20 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.conditional; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.util.List; public class LeastSerializationTests extends AbstractVarargsSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected Least create(Source source, Expression first, List rest) { return new Least(source, first, rest); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java index 0881b871c30f6..aa475f05ebe69 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.VaragsTestCaseBuilder; @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; -public class LeastTests extends AbstractFunctionTestCase { +public class LeastTests extends AbstractScalarFunctionTestCase { public LeastTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java index d97f070275617..e08da9850b555 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -27,7 +27,7 @@ import static org.hamcrest.Matchers.equalTo; @FunctionName("from_base64") -public class FromBase64Tests extends AbstractFunctionTestCase { +public class FromBase64Tests extends AbstractScalarFunctionTestCase { public FromBase64Tests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java index 4c9175e4906bf..88ca7d0452b3e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -27,7 +27,7 @@ import static org.hamcrest.Matchers.equalTo; @FunctionName("to_base64") -public class ToBase64Tests extends AbstractFunctionTestCase { +public class ToBase64Tests extends AbstractScalarFunctionTestCase { public ToBase64Tests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java index c4e53d922ac60..c5b9b2501aeae 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -24,7 +24,7 @@ import static java.util.Collections.emptyList; -public class ToBooleanTests extends AbstractFunctionTestCase { +public class ToBooleanTests extends AbstractScalarFunctionTestCase { public ToBooleanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java index 1c1431fe3b7ea..a59e7b0085e4c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -27,7 +27,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.CARTESIAN; @FunctionName("to_cartesianpoint") -public class ToCartesianPointTests extends AbstractFunctionTestCase { +public class ToCartesianPointTests extends AbstractScalarFunctionTestCase { public ToCartesianPointTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java index 48a610804845d..973431d676b82 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -27,7 +27,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.CARTESIAN; @FunctionName("to_cartesianshape") -public class ToCartesianShapeTests extends AbstractFunctionTestCase { +public class ToCartesianShapeTests extends AbstractScalarFunctionTestCase { public ToCartesianShapeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index 6aef91be43088..e512334391bed 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -26,7 +26,7 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.DEFAULT_DATE_TIME_FORMATTER; -public class ToDatetimeTests extends AbstractFunctionTestCase { +public class ToDatetimeTests extends AbstractScalarFunctionTestCase { public ToDatetimeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java index fc45c8b26a869..bd07141009d3e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -22,7 +22,7 @@ import java.util.function.Function; import java.util.function.Supplier; -public class ToDegreesTests extends AbstractFunctionTestCase { +public class ToDegreesTests extends AbstractScalarFunctionTestCase { public ToDegreesTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java index 5f45cc11d9c5a..d4d20629da09e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; @@ -26,7 +26,7 @@ import java.util.function.Function; import java.util.function.Supplier; -public class ToDoubleTests extends AbstractFunctionTestCase { +public class ToDoubleTests extends AbstractScalarFunctionTestCase { public ToDoubleTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java index 2b5dc453acc23..7a3b83f3ab113 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -27,7 +27,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; @FunctionName("to_geopoint") -public class ToGeoPointTests extends AbstractFunctionTestCase { +public class ToGeoPointTests extends AbstractScalarFunctionTestCase { public ToGeoPointTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java index bca8dc822052f..831539852846c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -27,7 +27,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; @FunctionName("to_geoshape") -public class ToGeoShapeTests extends AbstractFunctionTestCase { +public class ToGeoShapeTests extends AbstractScalarFunctionTestCase { public ToGeoShapeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java index 20b48d24f8211..ffa94548f0a23 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -26,7 +26,7 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.util.StringUtils.parseIP; -public class ToIPTests extends AbstractFunctionTestCase { +public class ToIPTests extends AbstractScalarFunctionTestCase { public ToIPTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java index 45837c2110ff3..7984c1e04effc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -26,7 +26,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataTypeConverter.safeToInt; -public class ToIntegerTests extends AbstractFunctionTestCase { +public class ToIntegerTests extends AbstractScalarFunctionTestCase { public ToIntegerTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index 565562b8574d2..27c69ae977f6b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -25,7 +25,7 @@ import java.util.function.Function; import java.util.function.Supplier; -public class ToLongTests extends AbstractFunctionTestCase { +public class ToLongTests extends AbstractScalarFunctionTestCase { public ToLongTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java index 3f6e28c65142f..33e8eee7a8de4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -22,7 +22,7 @@ import java.util.function.Function; import java.util.function.Supplier; -public class ToRadiansTests extends AbstractFunctionTestCase { +public class ToRadiansTests extends AbstractScalarFunctionTestCase { public ToRadiansTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 0556742b55b3c..809b4ddaa78a4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -27,7 +27,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; -public class ToStringTests extends AbstractFunctionTestCase { +public class ToStringTests extends AbstractScalarFunctionTestCase { public ToStringTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java index 44092db499d2d..a1fccac8edfd1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigDecimal; @@ -27,7 +27,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataTypeConverter.safeToUnsignedLong; import static org.elasticsearch.xpack.esql.core.util.NumericUtils.UNSIGNED_LONG_MAX_AS_DOUBLE; -public class ToUnsignedLongTests extends AbstractFunctionTestCase { +public class ToUnsignedLongTests extends AbstractScalarFunctionTestCase { public ToUnsignedLongTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java index 34281442872a5..1c37afc1c0722 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.versionfield.Version; @@ -22,7 +22,7 @@ import java.util.List; import java.util.function.Supplier; -public class ToVersionTests extends AbstractFunctionTestCase { +public class ToVersionTests extends AbstractScalarFunctionTestCase { public ToVersionTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffSerializationTests.java index b1dc1b064ae5a..77158b6f1866f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffSerializationTests.java @@ -7,21 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class DateDiffSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected DateDiff createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffTests.java index 89cfda5c4bce5..4af2ce1b7cb00 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.time.ZonedDateTime; @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class DateDiffTests extends AbstractFunctionTestCase { +public class DateDiffTests extends AbstractScalarFunctionTestCase { public DateDiffTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractSerializationTests.java index 6e1c061c84f2e..f4e6d2672a40f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractSerializationTests.java @@ -7,21 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class DateExtractSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected DateExtract createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatSerializationTests.java index 4dff735318558..ece145e95aabb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatSerializationTests.java @@ -7,21 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class DateFormatSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected DateFormat createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseSerializationTests.java index e816f2c4a20fb..79a650c8dd963 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseSerializationTests.java @@ -7,21 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class DateParseSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected DateParse createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index 161b338cc85b2..f0aa766fb1bf9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -public class DateParseTests extends AbstractFunctionTestCase { +public class DateParseTests extends AbstractScalarFunctionTestCase { public DateParseTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncSerializationTests.java index 09d2e06003128..3d1616ce29adf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncSerializationTests.java @@ -7,21 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class DateTruncSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected DateTrunc createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java index 4c5a7d3734ce3..17d8cd6a57223 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.time.Duration; @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class DateTruncTests extends AbstractFunctionTestCase { +public class DateTruncTests extends AbstractScalarFunctionTestCase { public DateTruncTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowSerializationTests.java index 3bb8c2f260561..b816e3a8da858 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/NowSerializationTests.java @@ -7,19 +7,11 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class NowSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected Now createTestInstance() { return new Now(randomSource(), configuration()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchSerializationTests.java new file mode 100644 index 0000000000000..3c833c4b0d7ac --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchSerializationTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.ip; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; +import java.util.List; + +public class CIDRMatchSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected CIDRMatch createTestInstance() { + Source source = randomSource(); + Expression ipField = randomChild(); + List matches = randomList(1, 10, AbstractExpressionSerializationTests::randomChild); + return new CIDRMatch(source, ipField, matches); + } + + @Override + protected CIDRMatch mutateInstance(CIDRMatch instance) throws IOException { + Source source = instance.source(); + Expression ipField = instance.ipField(); + List matches = instance.matches(); + if (randomBoolean()) { + ipField = randomValueOtherThan(ipField, AbstractExpressionSerializationTests::randomChild); + } else { + matches = randomValueOtherThan(matches, () -> randomList(1, 10, AbstractExpressionSerializationTests::randomChild)); + } + return new CIDRMatch(source, ipField, matches); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java index 0d8f4bc7ea115..3cdc54f240a96 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; @FunctionName("cidr_match") -public class CIDRMatchTests extends AbstractFunctionTestCase { +public class CIDRMatchTests extends AbstractScalarFunctionTestCase { public CIDRMatchTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixSerializationTests.java new file mode 100644 index 0000000000000..d7fc05d9d0f64 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixSerializationTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.ip; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class IpPrefixSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected IpPrefix createTestInstance() { + Source source = randomSource(); + Expression ipField = randomChild(); + Expression prefixLengthV4Field = randomChild(); + Expression prefixLengthV6Field = randomChild(); + return new IpPrefix(source, ipField, prefixLengthV4Field, prefixLengthV6Field); + } + + @Override + protected IpPrefix mutateInstance(IpPrefix instance) throws IOException { + Source source = instance.source(); + Expression ipField = instance.ipField(); + Expression prefixLengthV4Field = instance.prefixLengthV4Field(); + Expression prefixLengthV6Field = instance.prefixLengthV6Field(); + switch (between(0, 2)) { + case 0 -> ipField = randomValueOtherThan(ipField, AbstractExpressionSerializationTests::randomChild); + case 1 -> prefixLengthV4Field = randomValueOtherThan(prefixLengthV4Field, AbstractExpressionSerializationTests::randomChild); + case 2 -> prefixLengthV6Field = randomValueOtherThan(prefixLengthV6Field, AbstractExpressionSerializationTests::randomChild); + } + return new IpPrefix(source, ipField, prefixLengthV4Field, prefixLengthV6Field); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java index a575eb48c4bd7..298bcb3f49466 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; -public class IpPrefixTests extends AbstractFunctionTestCase { +public class IpPrefixTests extends AbstractScalarFunctionTestCase { public IpPrefixTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java index 7bd195ab86389..b5923c7a5b214 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -23,7 +23,7 @@ import static org.hamcrest.Matchers.equalTo; -public class AbsTests extends AbstractFunctionTestCase { +public class AbsTests extends AbstractScalarFunctionTestCase { @ParametersFactory public static Iterable parameters() { List suppliers = new ArrayList<>(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java index 02974c10480d2..7c5cd87dfee39 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class AcosTests extends AbstractFunctionTestCase { +public class AcosTests extends AbstractScalarFunctionTestCase { public AcosTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java index d4d13c2054fcd..38e210d81e5fd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class AsinTests extends AbstractFunctionTestCase { +public class AsinTests extends AbstractScalarFunctionTestCase { public AsinTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2SerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2SerializationTests.java new file mode 100644 index 0000000000000..2ae88bbf24549 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2SerializationTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.math; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +import java.io.IOException; + +public class Atan2SerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Atan2 createTestInstance() { + Source source = randomSource(); + Expression y = randomChild(); + Expression x = randomChild(); + return new Atan2(source, y, x); + } + + @Override + protected Atan2 mutateInstance(Atan2 instance) throws IOException { + Source source = instance.source(); + Expression y = instance.y(); + Expression x = instance.x(); + if (randomBoolean()) { + y = randomValueOtherThan(y, AbstractUnaryScalarSerializationTests::randomChild); + } else { + x = randomValueOtherThan(x, AbstractUnaryScalarSerializationTests::randomChild); + } + return new Atan2(source, y, x); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java index 3b81316da5676..1144919094812 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class Atan2Tests extends AbstractFunctionTestCase { +public class Atan2Tests extends AbstractScalarFunctionTestCase { public Atan2Tests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java index c92c626a5601b..c9f7a1baeadbe 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class AtanTests extends AbstractFunctionTestCase { +public class AtanTests extends AbstractScalarFunctionTestCase { public AtanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtTests.java index 14d6075f5cbe3..f644d8bc72dce 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CbrtTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -24,7 +24,7 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; -public class CbrtTests extends AbstractFunctionTestCase { +public class CbrtTests extends AbstractScalarFunctionTestCase { public CbrtTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java index ff61ecfa39687..1572b928a0d75 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -23,7 +23,7 @@ import static org.hamcrest.Matchers.equalTo; -public class CeilTests extends AbstractFunctionTestCase { +public class CeilTests extends AbstractScalarFunctionTestCase { public CeilTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java index 61e7a1f051905..dc5eec4f90d32 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class CosTests extends AbstractFunctionTestCase { +public class CosTests extends AbstractScalarFunctionTestCase { public CosTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java index 1ea63cc006e9c..79557b15be08a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class CoshTests extends AbstractFunctionTestCase { +public class CoshTests extends AbstractScalarFunctionTestCase { public CoshTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMvSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ESerializationTests.java similarity index 50% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMvSerializationTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ESerializationTests.java index fba33c9ea1c03..971295aa02a9b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMvSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ESerializationTests.java @@ -5,17 +5,25 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; +package org.elasticsearch.xpack.esql.expression.function.scalar.math; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import java.util.List; +import java.io.IOException; -public abstract class AbstractMvSerializationTests extends AbstractExpressionSerializationTests { +public class ESerializationTests extends AbstractExpressionSerializationTests { @Override - protected List getNamedWriteables() { - return AbstractMultivalueFunction.getNamedWriteables(); + protected E createTestInstance() { + return new E(randomSource()); + } + + @Override + protected E mutateInstance(E instance) throws IOException { + return null; + } + + @Override + protected boolean alwaysEmptySource() { + return true; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java index 8eb0b80fc21d7..763ad3a2b49c9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class ETests extends AbstractFunctionTestCase { +public class ETests extends AbstractScalarFunctionTestCase { public ETests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java index f0c990ec64af1..269dabcc6b6b8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -21,7 +21,7 @@ import java.util.List; import java.util.function.Supplier; -public class FloorTests extends AbstractFunctionTestCase { +public class FloorTests extends AbstractScalarFunctionTestCase { public FloorTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java index 64329d7824b74..ca0c8718f5ac0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -24,7 +24,7 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.bigIntegerToUnsignedLong; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; -public class Log10Tests extends AbstractFunctionTestCase { +public class Log10Tests extends AbstractScalarFunctionTestCase { public Log10Tests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/LogSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/LogSerializationTests.java new file mode 100644 index 0000000000000..8b65a40d9e831 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/LogSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.math; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class LogSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Log createTestInstance() { + Source source = randomSource(); + Expression value = randomChild(); + Expression base = randomBoolean() ? null : randomChild(); + return new Log(source, value, base); + } + + @Override + protected Log mutateInstance(Log instance) throws IOException { + Source source = instance.source(); + Expression value = instance.value(); + Expression base = instance.base(); + if (randomBoolean()) { + value = randomValueOtherThan(value, AbstractExpressionSerializationTests::randomChild); + } else { + base = randomValueOtherThan(base, () -> randomBoolean() ? null : randomChild()); + } + return new Log(source, value, base); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/LogTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/LogTests.java index ce53fdbfc1851..1c002e111e575 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/LogTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/LogTests.java @@ -13,13 +13,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class LogTests extends AbstractFunctionTestCase { +public class LogTests extends AbstractScalarFunctionTestCase { public LogTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiSerializationTests.java new file mode 100644 index 0000000000000..597d1cbc8533c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.math; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class PiSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Pi createTestInstance() { + return new Pi(randomSource()); + } + + @Override + protected Pi mutateInstance(Pi instance) throws IOException { + return null; + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java index c21082b905962..8e427fcbae2b8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class PiTests extends AbstractFunctionTestCase { +public class PiTests extends AbstractScalarFunctionTestCase { public PiTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowSerializationTests.java new file mode 100644 index 0000000000000..b811d719ca923 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.math; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class PowSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Pow createTestInstance() { + Source source = randomSource(); + Expression base = randomChild(); + Expression exponent = randomChild(); + return new Pow(source, base, exponent); + } + + @Override + protected Pow mutateInstance(Pow instance) throws IOException { + Source source = instance.source(); + Expression base = instance.base(); + Expression exponent = instance.exponent(); + if (randomBoolean()) { + base = randomValueOtherThan(base, AbstractExpressionSerializationTests::randomChild); + } else { + exponent = randomValueOtherThan(exponent, AbstractExpressionSerializationTests::randomChild); + } + return new Pow(source, base, exponent); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java index 545e7c14ff2b2..bea0f399233fd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java @@ -13,13 +13,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class PowTests extends AbstractFunctionTestCase { +public class PowTests extends AbstractScalarFunctionTestCase { public PowTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundSerializationTests.java new file mode 100644 index 0000000000000..91e97a6d07b14 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.math; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class RoundSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Round createTestInstance() { + Source source = randomSource(); + Expression field = randomChild(); + Expression decimals = randomBoolean() ? null : randomChild(); + return new Round(source, field, decimals); + } + + @Override + protected Round mutateInstance(Round instance) throws IOException { + Source source = instance.source(); + Expression field = instance.field(); + Expression decimals = instance.decimals(); + if (randomBoolean()) { + field = randomValueOtherThan(field, AbstractExpressionSerializationTests::randomChild); + } else { + decimals = randomValueOtherThan(decimals, () -> randomBoolean() ? null : randomChild()); + } + return new Round(source, field, decimals); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java index 5e19d5f606034..c05388a9708da 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class RoundTests extends AbstractFunctionTestCase { +public class RoundTests extends AbstractScalarFunctionTestCase { public RoundTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SignumSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SignumSerializationTests.java new file mode 100644 index 0000000000000..98738aa8c64f6 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SignumSerializationTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.math; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +public class SignumSerializationTests extends AbstractUnaryScalarSerializationTests { + @Override + protected Signum create(Source source, Expression child) { + return new Signum(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SignumTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SignumTests.java index 89c2d07c4470a..21b44134458b7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SignumTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SignumTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -22,7 +22,7 @@ import java.util.List; import java.util.function.Supplier; -public class SignumTests extends AbstractFunctionTestCase { +public class SignumTests extends AbstractScalarFunctionTestCase { public SignumTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java index 0d9bd6bcae64a..7a1190d86c2bf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class SinTests extends AbstractFunctionTestCase { +public class SinTests extends AbstractScalarFunctionTestCase { public SinTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java index 8f78e8ee67106..b83519c6d1299 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class SinhTests extends AbstractFunctionTestCase { +public class SinhTests extends AbstractScalarFunctionTestCase { public SinhTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java index a1d5b8523175c..9c81bbdc3cd49 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -24,7 +24,7 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.unsignedLongToDouble; -public class SqrtTests extends AbstractFunctionTestCase { +public class SqrtTests extends AbstractScalarFunctionTestCase { public SqrtTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java index 86c59a7a06cf4..369c33a1291f1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class TanTests extends AbstractFunctionTestCase { +public class TanTests extends AbstractScalarFunctionTestCase { public TanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java index 1f4fef4ab15c8..14fdcdca2fe96 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.List; import java.util.function.Supplier; -public class TanhTests extends AbstractFunctionTestCase { +public class TanhTests extends AbstractScalarFunctionTestCase { public TanhTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauSerializationTests.java new file mode 100644 index 0000000000000..fb259f0e43150 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauSerializationTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.math; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class TauSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Tau createTestInstance() { + return new Tau(randomSource()); + } + + @Override + protected Tau mutateInstance(Tau instance) throws IOException { + return null; + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java index aa64dfc6af90d..959db368ce348 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class TauTests extends AbstractFunctionTestCase { +public class TauTests extends AbstractScalarFunctionTestCase { public TauTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java index 2ea79d8a165c6..212b66027d455 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; import org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.hamcrest.Matcher; @@ -39,7 +39,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; -public abstract class AbstractMultivalueFunctionTestCase extends AbstractFunctionTestCase { +public abstract class AbstractMultivalueFunctionTestCase extends AbstractScalarFunctionTestCase { /** * Build many test cases with {@code boolean} values. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java index 8afd1b44dc3f3..9bbb4856b5e0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendSerializationTests.java @@ -13,7 +13,7 @@ import java.io.IOException; -public class MvAppendSerializationTests extends AbstractMvSerializationTests { +public class MvAppendSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvAppend createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java index bc1a64da1cc73..7039d9edf794b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java @@ -13,22 +13,10 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; -import org.elasticsearch.geometry.Circle; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.GeometryCollection; -import org.elasticsearch.geometry.GeometryVisitor; -import org.elasticsearch.geometry.Line; -import org.elasticsearch.geometry.LinearRing; -import org.elasticsearch.geometry.MultiLine; -import org.elasticsearch.geometry.MultiPoint; -import org.elasticsearch.geometry.MultiPolygon; -import org.elasticsearch.geometry.Point; -import org.elasticsearch.geometry.Polygon; -import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -39,7 +27,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.equalTo; -public class MvAppendTests extends AbstractFunctionTestCase { +public class MvAppendTests extends AbstractScalarFunctionTestCase { public MvAppendTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -247,25 +235,8 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.GEO_SHAPE, DataType.GEO_SHAPE), () -> { - GeometryPointCountVisitor pointCounter = new GeometryPointCountVisitor(); - List field1 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); - List field2 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field1 = randomList(1, 3, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); + var field2 = randomList(1, 3, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); var result = new ArrayList<>(field1); result.addAll(field2); return new TestCaseSupplier.TestCase( @@ -280,25 +251,8 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.CARTESIAN_SHAPE, DataType.CARTESIAN_SHAPE), () -> { - GeometryPointCountVisitor pointCounter = new GeometryPointCountVisitor(); - List field1 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> ShapeTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); - List field2 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> ShapeTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field1 = randomList(1, 3, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean(), 500)))); + var field2 = randomList(1, 3, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean(), 500)))); var result = new ArrayList<>(field1); result.addAll(field2); return new TestCaseSupplier.TestCase( @@ -339,65 +293,4 @@ private static void nulls(List suppliers) { ); })); } - - public static class GeometryPointCountVisitor implements GeometryVisitor { - - @Override - public Integer visit(Circle circle) throws RuntimeException { - return 2; - } - - @Override - public Integer visit(GeometryCollection collection) throws RuntimeException { - int size = 0; - for (Geometry geometry : collection) { - size += geometry.visit(this); - } - return size; - } - - @Override - public Integer visit(Line line) throws RuntimeException { - return line.length(); - } - - @Override - public Integer visit(LinearRing ring) throws RuntimeException { - return ring.length(); - } - - @Override - public Integer visit(MultiLine multiLine) throws RuntimeException { - return visit((GeometryCollection) multiLine); - } - - @Override - public Integer visit(MultiPoint multiPoint) throws RuntimeException { - return multiPoint.size(); - } - - @Override - public Integer visit(MultiPolygon multiPolygon) throws RuntimeException { - return visit((GeometryCollection) multiPolygon); - } - - @Override - public Integer visit(Point point) throws RuntimeException { - return 1; - } - - @Override - public Integer visit(Polygon polygon) throws RuntimeException { - int size = polygon.getPolygon().length(); - for (int i = 0; i < polygon.getNumberOfHoles(); i++) { - size += polygon.getHole(i).length(); - } - return size; - } - - @Override - public Integer visit(Rectangle rectangle) throws RuntimeException { - return 4; - } - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java index f70702b001492..271312622245d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvAvgSerializationTests extends AbstractMvSerializationTests { +public class MvAvgSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvAvg createTestInstance() { return new MvAvg(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java index 9f2aba8d9d9ca..ba4eda8590f70 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatSerializationTests.java @@ -13,7 +13,7 @@ import java.io.IOException; -public class MvConcatSerializationTests extends AbstractMvSerializationTests { +public class MvConcatSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvConcat createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java index ba4ddb1be84cc..0277093152cba 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class MvConcatTests extends AbstractFunctionTestCase { +public class MvConcatTests extends AbstractScalarFunctionTestCase { public MvConcatTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java index a0d28a6cf925b..0ec51d73982ec 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvCountSerializationTests extends AbstractMvSerializationTests { +public class MvCountSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvCount createTestInstance() { return new MvCount(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java index afb2ec90e1e3e..410167addf163 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvDedupeSerializationTests extends AbstractMvSerializationTests { +public class MvDedupeSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvDedupe createTestInstance() { return new MvDedupe(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java index dbb49bb96a663..8934dde1717c6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvFirstSerializationTests extends AbstractMvSerializationTests { +public class MvFirstSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvFirst createTestInstance() { return new MvFirst(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java index 190eb0263c162..9c4ad7ab059ef 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvLastSerializationTests extends AbstractMvSerializationTests { +public class MvLastSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvLast createTestInstance() { return new MvLast(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java index ffc51af5f103d..4ce5112c4e8e7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvMaxSerializationTests extends AbstractMvSerializationTests { +public class MvMaxSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvMax createTestInstance() { return new MvMax(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java index 067cc6430ce01..0e35ec6f77150 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvMedianSerializationTests extends AbstractMvSerializationTests { +public class MvMedianSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvMedian createTestInstance() { return new MvMedian(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java index 1f38587274353..0769e41a09921 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvMinSerializationTests extends AbstractMvSerializationTests { +public class MvMinSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvMin createTestInstance() { return new MvMin(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java index 64209ce0f4644..ffa355178b460 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceSerializationTests.java @@ -13,7 +13,7 @@ import java.io.IOException; -public class MvSliceSerializationTests extends AbstractMvSerializationTests { +public class MvSliceSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvSlice createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java index 0550be25f9d91..5684c68051446 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -public class MvSliceTests extends AbstractFunctionTestCase { +public class MvSliceTests extends AbstractScalarFunctionTestCase { public MvSliceTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -306,16 +306,7 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.GEO_SHAPE, DataType.INTEGER, DataType.INTEGER), () -> { - var pointCounter = new MvAppendTests.GeometryPointCountVisitor(); - List field = randomList( - 1, - 5, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field = randomList(1, 5, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); int length = field.size(); int start = randomIntBetween(0, length - 1); int end = randomIntBetween(start, length - 1); @@ -332,16 +323,7 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.CARTESIAN_SHAPE, DataType.INTEGER, DataType.INTEGER), () -> { - var pointCounter = new MvAppendTests.GeometryPointCountVisitor(); - List field = randomList( - 1, - 5, - () -> new BytesRef( - CARTESIAN.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field = randomList(1, 5, () -> new BytesRef(CARTESIAN.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); int length = field.size(); int start = randomIntBetween(0, length - 1); int end = randomIntBetween(start, length - 1); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java index 1728ad6f09357..d7dba33e1aae3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortSerializationTests.java @@ -13,7 +13,7 @@ import java.io.IOException; -public class MvSortSerializationTests extends AbstractMvSerializationTests { +public class MvSortSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvSort createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java index 7c6413e590bfe..15c81557961f1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java @@ -12,10 +12,12 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -25,7 +27,7 @@ import static org.hamcrest.Matchers.equalTo; -public class MvSortTests extends AbstractFunctionTestCase { +public class MvSortTests extends AbstractScalarFunctionTestCase { public MvSortTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -183,6 +185,22 @@ private static void bytesRefs(List suppliers) { })); } + public void testInvalidOrder() { + String invalidOrder = randomAlphaOfLength(10); + DriverContext driverContext = driverContext(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> evaluator( + new MvSort( + Source.EMPTY, + field("str", DataType.DATETIME), + new Literal(Source.EMPTY, new BytesRef(invalidOrder), DataType.KEYWORD) + ) + ).get(driverContext) + ); + assertThat(e.getMessage(), equalTo("Invalid order value in [], expected one of [ASC, DESC] but got [" + invalidOrder + "]")); + } + @Override public void testSimpleWithNulls() { assumeFalse("test case is invalid", false); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java index e8ddcc9340b45..15f6d94b44066 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumSerializationTests.java @@ -11,7 +11,7 @@ import java.io.IOException; -public class MvSumSerializationTests extends AbstractMvSerializationTests { +public class MvSumSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvSum createTestInstance() { return new MvSum(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java index d16ca02627b29..4b49a1f55340d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipSerializationTests.java @@ -13,7 +13,7 @@ import java.io.IOException; -public class MvZipSerializationTests extends AbstractMvSerializationTests { +public class MvZipSerializationTests extends AbstractExpressionSerializationTests { @Override protected MvZip createTestInstance() { Source source = randomSource(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java index 30fe420f29960..e9f0fd5b51516 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvZipTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -25,7 +25,7 @@ import static java.lang.Math.max; import static org.hamcrest.Matchers.equalTo; -public class MvZipTests extends AbstractFunctionTestCase { +public class MvZipTests extends AbstractScalarFunctionTestCase { public MvZipTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceSerializationTests.java index 7cab0a957b235..ad792b6b66668 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceSerializationTests.java @@ -7,20 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.nulls; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.util.List; public class CoalesceSerializationTests extends AbstractVarargsSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected Coalesce create(Source source, Expression first, List rest) { return new Coalesce(source, first, rest); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java index 42022099ceace..c779fa9e2789f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.VaragsTestCaseBuilder; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunctionTestCase; @@ -39,7 +39,7 @@ import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.equalTo; -public class CoalesceTests extends AbstractFunctionTestCase { +public class CoalesceTests extends AbstractScalarFunctionTestCase { public CoalesceTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -105,8 +105,8 @@ public static Iterable parameters() { protected static void addSpatialCombinations(List suppliers) { for (DataType dataType : List.of(DataType.GEO_POINT, DataType.GEO_SHAPE, DataType.CARTESIAN_POINT, DataType.CARTESIAN_SHAPE)) { - TestCaseSupplier.TypedDataSupplier leftDataSupplier = SpatialRelatesFunctionTestCase.testCaseSupplier(dataType); - TestCaseSupplier.TypedDataSupplier rightDataSupplier = SpatialRelatesFunctionTestCase.testCaseSupplier(dataType); + TestCaseSupplier.TypedDataSupplier leftDataSupplier = SpatialRelatesFunctionTestCase.testCaseSupplier(dataType, false); + TestCaseSupplier.TypedDataSupplier rightDataSupplier = SpatialRelatesFunctionTestCase.testCaseSupplier(dataType, false); suppliers.add( TestCaseSupplier.testCaseSupplier( leftDataSupplier, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullSerializationTests.java index 23545b3627a1a..bd309e4a893dc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullSerializationTests.java @@ -7,22 +7,14 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.nulls; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import java.io.IOException; -import java.util.List; public class IsNotNullSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return UnaryScalarFunction.getNamedWriteables(); - } - @Override protected IsNotNull createTestInstance() { return new IsNotNull(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java index 299b66433dcd0..b99b47b6f505a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.hamcrest.Matcher; @@ -27,7 +27,7 @@ import static org.hamcrest.Matchers.equalTo; -public class IsNotNullTests extends AbstractFunctionTestCase { +public class IsNotNullTests extends AbstractScalarFunctionTestCase { public IsNotNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullSerializationTests.java index 354a2129d7ec0..60bf3085c6d13 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullSerializationTests.java @@ -7,22 +7,14 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.nulls; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import java.io.IOException; -import java.util.List; public class IsNullSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return UnaryScalarFunction.getNamedWriteables(); - } - @Override protected IsNull createTestInstance() { return new IsNull(randomSource(), randomChild()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java index 606e9598bda63..7abfad39967a5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.hamcrest.Matcher; @@ -27,7 +27,7 @@ import static org.hamcrest.Matchers.equalTo; -public class IsNullTests extends AbstractFunctionTestCase { +public class IsNullTests extends AbstractScalarFunctionTestCase { public IsNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/AbstractBinarySpatialFunctionSerializationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/AbstractBinarySpatialFunctionSerializationTestCase.java new file mode 100644 index 0000000000000..006fdf6865340 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/AbstractBinarySpatialFunctionSerializationTestCase.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public abstract class AbstractBinarySpatialFunctionSerializationTestCase extends + AbstractExpressionSerializationTests { + + protected abstract T build(Source source, Expression left, Expression right); + + @Override + protected final T createTestInstance() { + Source source = randomSource(); + Expression left = randomChild(); + Expression right = randomChild(); + return build(source, left, right); + } + + @Override + protected final T mutateInstance(T instance) throws IOException { + Source source = instance.source(); + Expression left = instance.left(); + Expression right = instance.right(); + if (randomBoolean()) { + left = randomValueOtherThan(left, AbstractExpressionSerializationTests::randomChild); + } else { + right = randomValueOtherThan(right, AbstractExpressionSerializationTests::randomChild); + } + return build(source, left, right); + } + + @Override + protected final boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunctionTestCase.java new file mode 100644 index 0000000000000..a30cce9f765ed --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunctionTestCase.java @@ -0,0 +1,293 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import joptsimple.internal.Strings; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.function.BinaryOperator; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction.compatibleTypeNames; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isSpatial; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isSpatialGeo; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isString; +import static org.hamcrest.Matchers.equalTo; + +public abstract class BinarySpatialFunctionTestCase extends AbstractScalarFunctionTestCase { + + private static String getFunctionClassName() { + Class testClass = getTestClass(); + String testClassName = testClass.getSimpleName(); + return testClassName.replace("Tests", ""); + } + + protected static Class getSpatialRelatesFunctionClass() throws ClassNotFoundException { + String functionClassName = getFunctionClassName(); + return Class.forName("org.elasticsearch.xpack.esql.expression.function.scalar.spatial." + functionClassName); + } + + public static TestCaseSupplier.TypedDataSupplier testCaseSupplier(DataType dataType, boolean pointsOnly) { + if (pointsOnly) { + return switch (dataType.esType()) { + case "geo_point" -> TestCaseSupplier.geoPointCases(() -> false).get(0); + case "cartesian_point" -> TestCaseSupplier.cartesianPointCases(() -> false).get(0); + default -> throw new IllegalArgumentException("Unsupported datatype for " + functionName() + ": " + dataType); + }; + } else { + return switch (dataType.esType()) { + case "geo_point" -> TestCaseSupplier.geoPointCases(() -> false).get(0); + case "geo_shape" -> TestCaseSupplier.geoShapeCases(() -> false).get(0); + case "cartesian_point" -> TestCaseSupplier.cartesianPointCases(() -> false).get(0); + case "cartesian_shape" -> TestCaseSupplier.cartesianShapeCases(() -> false).get(0); + default -> throw new IllegalArgumentException("Unsupported datatype for " + functionName() + ": " + dataType); + }; + } + } + + /** + * Binary spatial functions that take two spatial arguments + * should use this to generate combinations of test cases. + */ + protected static void addSpatialCombinations( + List suppliers, + DataType[] dataTypes, + DataType returnType, + boolean pointsOnly + ) { + for (DataType leftType : dataTypes) { + TestCaseSupplier.TypedDataSupplier leftDataSupplier = testCaseSupplier(leftType, pointsOnly); + for (DataType rightType : dataTypes) { + if (typeCompatible(leftType, rightType)) { + TestCaseSupplier.TypedDataSupplier rightDataSupplier = testCaseSupplier(rightType, pointsOnly); + suppliers.add( + TestCaseSupplier.testCaseSupplier( + leftDataSupplier, + rightDataSupplier, + BinarySpatialFunctionTestCase::spatialEvaluatorString, + returnType, + (l, r) -> expected(l, leftType, r, rightType) + ) + ); + } + } + } + } + + /** + * Build the expected error message for an invalid type signature. + * For two args, this assumes they are both spatial. + * For three args, we assume two spatial and one additional numerical argument, treated differently. + */ + protected static String typeErrorMessage( + boolean includeOrdinal, + List> validPerPosition, + List types, + boolean pointsOnly + ) { + boolean argInvalid = false; + List badArgPositions = new ArrayList<>(); + for (int i = 0; i < types.size(); i++) { + if (validPerPosition.get(i).contains(types.get(i)) == false) { + if (i == 2) { + argInvalid = true; + } else { + badArgPositions.add(i); + } + } + } + if (badArgPositions.isEmpty() && types.get(0) != DataType.NULL && types.get(1) != DataType.NULL) { + // First two arguments are valid spatial types, but it is still possible they are incompatible + var leftCrs = BinarySpatialFunction.SpatialCrsType.fromDataType(types.get(0)); + var rightCrs = BinarySpatialFunction.SpatialCrsType.fromDataType(types.get(1)); + if (leftCrs != rightCrs) { + badArgPositions.add(1); + } + } + if (badArgPositions.size() == 1) { + int badArgPosition = badArgPositions.get(0); + int goodArgPosition = badArgPosition == 0 ? 1 : 0; + if (isSpatial(types.get(goodArgPosition)) == false) { + return oneInvalid(badArgPosition, -1, includeOrdinal, types, pointsOnly); + } else { + return oneInvalid(badArgPosition, goodArgPosition, includeOrdinal, types, pointsOnly); + } + } else if (argInvalid && badArgPositions.size() != 2) { + return invalidArg(types.get(2)); + } else { + return oneInvalid(0, -1, includeOrdinal, types, pointsOnly); + } + } + + private static String invalidArg(DataType invalidType) { + return String.format( + Locale.ROOT, + "%s argument of [%s] must be [%s], found value [%s] type [%s]", + TypeResolutions.ParamOrdinal.fromIndex(2).toString().toLowerCase(Locale.ROOT), + "", + "double", + invalidType.typeName(), + invalidType.typeName() + ); + } + + private static String oneInvalid( + int badArgPosition, + int goodArgPosition, + boolean includeOrdinal, + List types, + boolean pointsOnly + ) { + String expected = pointsOnly ? "geo_point or cartesian_point" : "geo_point, cartesian_point, geo_shape or cartesian_shape"; + String ordinal = includeOrdinal ? TypeResolutions.ParamOrdinal.fromIndex(badArgPosition).name().toLowerCase(Locale.ROOT) + " " : ""; + String expectedType = goodArgPosition >= 0 ? compatibleTypes(types.get(goodArgPosition)) : expected; + String name = types.get(badArgPosition).typeName(); + return ordinal + "argument of [] must be [" + expectedType + "], found value [" + name + "] type [" + name + "]"; + } + + private static String compatibleTypes(DataType spatialDataType) { + return Strings.join(compatibleTypeNames(spatialDataType), " or "); + } + + protected static Object expected(Object left, DataType leftType, Object right, DataType rightType) { + if (typeCompatible(leftType, rightType) == false) { + return null; + } + // TODO cast objects to right type and check intersection + BytesRef leftWKB = asGeometryWKB(left, leftType); + BytesRef rightWKB = asGeometryWKB(right, rightType); + BinarySpatialFunction.BinarySpatialComparator spatialRelations = spatialRelations(left, leftType, right, rightType); + try { + return spatialRelations.compare(leftWKB, rightWKB); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * When two spatial arguments are processed and then compared with a third argument, + * we need to process this argument too, before producing the final result. + */ + protected static Object expected( + Object left, + DataType leftType, + Object right, + DataType rightType, + Object arg, + BinaryOperator argProcessor + ) { + Object result = expected(left, leftType, right, rightType); + if (result == null) { + return null; + } + return argProcessor.apply(result, arg); + } + + private static BinarySpatialFunction.BinarySpatialComparator getRelationsField(String name) { + try { + Field field = getSpatialRelatesFunctionClass().getField(name); + Object value = field.get(null); + return (BinarySpatialFunction.BinarySpatialComparator) value; + } catch (NoSuchFieldException | ClassNotFoundException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private static BinarySpatialFunction.BinarySpatialComparator spatialRelations( + Object left, + DataType leftType, + Object right, + DataType rightType + ) { + if (isSpatialGeo(leftType) || isSpatialGeo(rightType)) { + return getRelationsField("GEO"); + } else if (isSpatial(leftType) || isSpatial(rightType)) { + return getRelationsField("CARTESIAN"); + } else { + throw new IllegalArgumentException( + "Unsupported left and right types: left[" + + leftType.esType() + + ":" + + left.getClass().getSimpleName() + + "] right[" + + rightType.esType() + + ":" + + right.getClass().getSimpleName() + + "]" + ); + } + } + + protected static BytesRef asGeometryWKB(Object object, DataType dataType) { + if (isString(dataType)) { + return SpatialCoordinateTypes.UNSPECIFIED.wktToWkb(object.toString()); + } else if (object instanceof BytesRef wkb) { + return wkb; + } else { + throw new IllegalArgumentException("Invalid geometry base type for " + dataType + ": " + object.getClass().getSimpleName()); + } + } + + protected static boolean typeCompatible(DataType leftType, DataType rightType) { + if (isSpatial(leftType) && isSpatial(rightType)) { + // Both must be GEO_* or both must be CARTESIAN_* + return countGeo(leftType, rightType) != 1; + } + return true; + } + + private static DataType pickSpatialType(DataType leftType, DataType rightType) { + if (isSpatial(leftType)) { + return leftType; + } else if (isSpatial(rightType)) { + return rightType; + } else { + throw new IllegalArgumentException("Invalid spatial types: " + leftType + " and " + rightType); + } + } + + private static Matcher spatialEvaluatorString(DataType leftType, DataType rightType) { + String crsType = isSpatialGeo(pickSpatialType(leftType, rightType)) ? "Geo" : "Cartesian"; + String channels = channelsText("leftValue", "rightValue"); + return equalTo(getFunctionClassName() + crsType + "SourceAndSourceEvaluator[" + channels + "]"); + } + + private static Matcher spatialEvaluatorString(DataType leftType, DataType rightType, DataType argType) { + String crsType = isSpatialGeo(pickSpatialType(leftType, rightType)) ? "Geo" : "Cartesian"; + String channels = channelsText("leftValue", "rightValue", "argValue"); + return equalTo(getFunctionClassName() + crsType + "FieldAndFieldAndFieldEvaluator[" + channels + "]"); + } + + private static String channelsText(String... args) { + return IntStream.range(0, args.length).mapToObj(i -> args[i] + "=Attribute[channel=" + i + "]").collect(Collectors.joining(", ")); + } + + private static int countGeo(DataType... types) { + int count = 0; + for (DataType type : types) { + if (isSpatialGeo(type)) { + count++; + } + } + return count; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContainsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContainsSerializationTests.java new file mode 100644 index 0000000000000..5c707f54ac9d0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContainsSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class SpatialContainsSerializationTests extends AbstractBinarySpatialFunctionSerializationTestCase { + @Override + protected SpatialContains build(Source source, Expression left, Expression right) { + return new SpatialContains(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjointSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjointSerializationTests.java new file mode 100644 index 0000000000000..a16e7ffdb2d17 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjointSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class SpatialDisjointSerializationTests extends AbstractBinarySpatialFunctionSerializationTestCase { + @Override + protected SpatialDisjoint build(Source source, Expression left, Expression right) { + return new SpatialDisjoint(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersectsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersectsSerializationTests.java new file mode 100644 index 0000000000000..35a85926101f5 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersectsSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class SpatialIntersectsSerializationTests extends AbstractBinarySpatialFunctionSerializationTestCase { + @Override + protected SpatialIntersects build(Source source, Expression left, Expression right) { + return new SpatialIntersects(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunctionTestCase.java index 9929971c48613..53ed472a4d43f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunctionTestCase.java @@ -7,202 +7,19 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; -import joptsimple.internal.Strings; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.hamcrest.Matcher; -import java.io.IOException; -import java.lang.reflect.Field; -import java.util.ArrayList; import java.util.List; -import java.util.Locale; import java.util.Set; -import static org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction.compatibleTypeNames; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isSpatial; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isSpatialGeo; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isString; -import static org.hamcrest.Matchers.equalTo; - -public abstract class SpatialRelatesFunctionTestCase extends AbstractFunctionTestCase { - - private static String getFunctionClassName() { - Class testClass = getTestClass(); - String testClassName = testClass.getSimpleName(); - return testClassName.replace("Tests", ""); - } - - private static Class getSpatialRelatesFunctionClass() throws ClassNotFoundException { - String functionClassName = getFunctionClassName(); - return Class.forName("org.elasticsearch.xpack.esql.expression.function.scalar.spatial." + functionClassName); - } - - private static SpatialRelatesFunction.SpatialRelations getRelationsField(String name) { - try { - Field field = getSpatialRelatesFunctionClass().getField(name); - Object value = field.get(null); - return (SpatialRelatesFunction.SpatialRelations) value; - } catch (NoSuchFieldException | ClassNotFoundException | IllegalAccessException e) { - throw new RuntimeException(e); - } - } +public abstract class SpatialRelatesFunctionTestCase extends BinarySpatialFunctionTestCase { protected static void addSpatialCombinations(List suppliers, DataType[] dataTypes) { - for (DataType leftType : dataTypes) { - TestCaseSupplier.TypedDataSupplier leftDataSupplier = testCaseSupplier(leftType); - for (DataType rightType : dataTypes) { - if (typeCompatible(leftType, rightType)) { - TestCaseSupplier.TypedDataSupplier rightDataSupplier = testCaseSupplier(rightType); - suppliers.add( - TestCaseSupplier.testCaseSupplier( - leftDataSupplier, - rightDataSupplier, - SpatialRelatesFunctionTestCase::spatialEvaluatorString, - DataType.BOOLEAN, - (l, r) -> expected(l, leftType, r, rightType) - ) - ); - } - } - } + addSpatialCombinations(suppliers, dataTypes, DataType.BOOLEAN, false); } - /** - * Build the expected error message for an invalid type signature. - */ protected static String typeErrorMessage(boolean includeOrdinal, List> validPerPosition, List types) { - List badArgPositions = new ArrayList<>(); - for (int i = 0; i < types.size(); i++) { - if (validPerPosition.get(i).contains(types.get(i)) == false) { - badArgPositions.add(i); - } - } - if (badArgPositions.isEmpty()) { - return oneInvalid(1, 0, includeOrdinal, types); - } else if (badArgPositions.size() == 1) { - int badArgPosition = badArgPositions.get(0); - int goodArgPosition = badArgPosition == 0 ? 1 : 0; - if (isSpatial(types.get(goodArgPosition)) == false) { - return oneInvalid(badArgPosition, -1, includeOrdinal, types); - } else { - return oneInvalid(badArgPosition, goodArgPosition, includeOrdinal, types); - } - } else { - return oneInvalid(0, -1, includeOrdinal, types); - } - } - - private static String oneInvalid(int badArgPosition, int goodArgPosition, boolean includeOrdinal, List types) { - String ordinal = includeOrdinal ? TypeResolutions.ParamOrdinal.fromIndex(badArgPosition).name().toLowerCase(Locale.ROOT) + " " : ""; - String expectedType = goodArgPosition >= 0 - ? compatibleTypes(types.get(goodArgPosition)) - : "geo_point, cartesian_point, geo_shape or cartesian_shape"; - String name = types.get(badArgPosition).typeName(); - return ordinal + "argument of [] must be [" + expectedType + "], found value [" + name + "] type [" + name + "]"; - } - - private static String compatibleTypes(DataType spatialDataType) { - return Strings.join(compatibleTypeNames(spatialDataType), " or "); - } - - public static TestCaseSupplier.TypedDataSupplier testCaseSupplier(DataType dataType) { - return switch (dataType.esType()) { - case "geo_point" -> TestCaseSupplier.geoPointCases(() -> false).get(0); - case "geo_shape" -> TestCaseSupplier.geoShapeCases(() -> false).get(0); - case "cartesian_point" -> TestCaseSupplier.cartesianPointCases(() -> false).get(0); - case "cartesian_shape" -> TestCaseSupplier.cartesianShapeCases(() -> false).get(0); - default -> throw new IllegalArgumentException("Unsupported datatype for " + functionName() + ": " + dataType); - }; - } - - private static Object expected(Object left, DataType leftType, Object right, DataType rightType) { - if (typeCompatible(leftType, rightType) == false) { - return null; - } - // TODO cast objects to right type and check intersection - BytesRef leftWKB = asGeometryWKB(left, leftType); - BytesRef rightWKB = asGeometryWKB(right, rightType); - SpatialRelatesFunction.SpatialRelations spatialRelations = spatialRelations(left, leftType, right, rightType); - try { - return spatialRelations.geometryRelatesGeometry(leftWKB, rightWKB); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - private static SpatialRelatesFunction.SpatialRelations spatialRelations( - Object left, - DataType leftType, - Object right, - DataType rightType - ) { - if (isSpatialGeo(leftType) || isSpatialGeo(rightType)) { - return getRelationsField("GEO"); - } else if (isSpatial(leftType) || isSpatial(rightType)) { - return getRelationsField("CARTESIAN"); - } else { - throw new IllegalArgumentException( - "Unsupported left and right types: left[" - + leftType.esType() - + ":" - + left.getClass().getSimpleName() - + "] right[" - + rightType.esType() - + ":" - + right.getClass().getSimpleName() - + "]" - ); - } - } - - private static BytesRef asGeometryWKB(Object object, DataType dataType) { - if (isString(dataType)) { - return SpatialCoordinateTypes.UNSPECIFIED.wktToWkb(object.toString()); - } else if (object instanceof BytesRef wkb) { - return wkb; - } else { - throw new IllegalArgumentException("Invalid geometry base type for " + dataType + ": " + object.getClass().getSimpleName()); - } - } - - private static boolean typeCompatible(DataType leftType, DataType rightType) { - if (isSpatial(leftType) && isSpatial(rightType)) { - // Both must be GEO_* or both must be CARTESIAN_* - return countGeo(leftType, rightType) != 1; - } - return true; - } - - private static DataType pickSpatialType(DataType leftType, DataType rightType) { - if (isSpatial(leftType)) { - return leftType; - } else if (isSpatial(rightType)) { - return rightType; - } else { - throw new IllegalArgumentException("Invalid spatial types: " + leftType + " and " + rightType); - } - } - - public static Matcher spatialEvaluatorString(DataType leftType, DataType rightType) { - String crsType = isSpatialGeo(pickSpatialType(leftType, rightType)) ? "Geo" : "Cartesian"; - return equalTo( - getFunctionClassName() + crsType + "SourceAndSourceEvaluator[leftValue=Attribute[channel=0], rightValue=Attribute[channel=1]]" - ); - } - - private static int countGeo(DataType... types) { - int count = 0; - for (DataType type : types) { - if (isSpatialGeo(type)) { - count++; - } - } - return count; + return typeErrorMessage(includeOrdinal, validPerPosition, types, false); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithinSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithinSerializationTests.java new file mode 100644 index 0000000000000..74fe752b59eaf --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithinSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class SpatialWithinSerializationTests extends AbstractBinarySpatialFunctionSerializationTestCase { + @Override + protected SpatialWithin build(Source source, Expression left, Expression right) { + return new SpatialWithin(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceSerializationTests.java new file mode 100644 index 0000000000000..c9cdccdaf0ca2 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceSerializationTests.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; + +public class StDistanceSerializationTests extends AbstractBinarySpatialFunctionSerializationTestCase { + @Override + protected StDistance build(Source source, Expression left, Expression right) { + return new StDistance(source, left, right); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceTests.java new file mode 100644 index 0000000000000..c78977918fc5e --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistanceTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.function.Supplier; + +@FunctionName("st_distance") +public class StDistanceTests extends BinarySpatialFunctionTestCase { + public StDistanceTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + DataType[] geoDataTypes = { DataType.GEO_POINT }; + StDistanceTests.addSpatialCombinations(suppliers, geoDataTypes); + DataType[] cartesianDataTypes = { DataType.CARTESIAN_POINT }; + StDistanceTests.addSpatialCombinations(suppliers, cartesianDataTypes); + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), StDistanceTests::typeErrorMessage) + ); + } + + @Override + protected Expression build(Source source, List args) { + return new StDistance(source, args.get(0), args.get(1)); + } + + protected static void addSpatialCombinations(List suppliers, DataType[] dataTypes) { + addSpatialCombinations(suppliers, dataTypes, DataType.DOUBLE, true); + } + + protected static String typeErrorMessage(boolean includeOrdinal, List> validPerPosition, List types) { + return typeErrorMessage(includeOrdinal, validPerPosition, types, true); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXSerializationTests.java new file mode 100644 index 0000000000000..a497d8a50f06b --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXSerializationTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class StXSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected StX createTestInstance() { + return new StX(randomSource(), randomChild()); + } + + @Override + protected StX mutateInstance(StX instance) throws IOException { + return new StX(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java index fa0fc8465ce7a..71e73398ddcd4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -25,7 +25,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; @FunctionName("st_x") -public class StXTests extends AbstractFunctionTestCase { +public class StXTests extends AbstractScalarFunctionTestCase { public StXTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYSerializationTests.java new file mode 100644 index 0000000000000..5b3edb9cd0a24 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYSerializationTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class StYSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected StY createTestInstance() { + return new StY(randomSource(), randomChild()); + } + + @Override + protected StY mutateInstance(StY instance) throws IOException { + return new StY(instance.source(), randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java index 15f34271be779..a30ae924754d6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -25,7 +25,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.UNSPECIFIED; @FunctionName("st_y") -public class StYTests extends AbstractFunctionTestCase { +public class StYTests extends AbstractScalarFunctionTestCase { public StYTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java index 27e3fc8684efc..a92f3ffb49533 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java @@ -9,7 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -18,7 +18,7 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractTrimTests extends AbstractFunctionTestCase { +public abstract class AbstractTrimTests extends AbstractScalarFunctionTestCase { static Iterable parameters(String name, boolean trimLeading, boolean trimTrailing) { List suppliers = new ArrayList<>(); for (DataType type : strings()) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatSerializationTests.java index 30f6acffbaf8a..c1fa6f9274a8c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatSerializationTests.java @@ -7,20 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractVarargsSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.util.List; public class ConcatSerializationTests extends AbstractVarargsSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected Concat create(Source source, Expression first, List rest) { return new Concat(source, first, rest); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java index f46ae25fddfc7..c398faacb90d0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -35,7 +35,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class ConcatTests extends AbstractFunctionTestCase { +public class ConcatTests extends AbstractScalarFunctionTestCase { public ConcatTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithSerializationTests.java new file mode 100644 index 0000000000000..183e39f11b6c3 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class EndsWithSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected EndsWith createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression suffix = randomChild(); + return new EndsWith(source, str, suffix); + } + + @Override + protected EndsWith mutateInstance(EndsWith instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression suffix = instance.suffix(); + if (randomBoolean()) { + str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + } else { + suffix = randomValueOtherThan(suffix, AbstractExpressionSerializationTests::randomChild); + } + return new EndsWith(source, str, suffix); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java index 863243a352bb0..5ae69b03ae882 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class EndsWithTests extends AbstractFunctionTestCase { +public class EndsWithTests extends AbstractScalarFunctionTestCase { public EndsWithTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimSerializationTests.java new file mode 100644 index 0000000000000..e3cac6caf130d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimSerializationTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +public class LTrimSerializationTests extends AbstractUnaryScalarSerializationTests { + @Override + protected LTrim create(Source source, Expression child) { + return new LTrim(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftSerializationTests.java new file mode 100644 index 0000000000000..b20d740954ff9 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class LeftSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Left createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression length = randomChild(); + return new Left(source, str, length); + } + + @Override + protected Left mutateInstance(Left instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression length = instance.length(); + if (randomBoolean()) { + str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + } else { + length = randomValueOtherThan(length, AbstractExpressionSerializationTests::randomChild); + } + return new Left(source, str, length); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java index 7d6e3439c8063..88ee7881e128a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -28,7 +28,7 @@ import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.equalTo; -public class LeftTests extends AbstractFunctionTestCase { +public class LeftTests extends AbstractScalarFunctionTestCase { public LeftTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthSerializationTests.java new file mode 100644 index 0000000000000..07b8cb722096b --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthSerializationTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +public class LengthSerializationTests extends AbstractUnaryScalarSerializationTests { + @Override + protected Length create(Source source, Expression child) { + return new Length(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java index 4a7e6b3a0996d..a1451b6bedf7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; -public class LengthTests extends AbstractFunctionTestCase { +public class LengthTests extends AbstractScalarFunctionTestCase { public LengthTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateSerializationTests.java new file mode 100644 index 0000000000000..a75fb9d1f772a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateSerializationTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class LocateSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Locate createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression substr = randomChild(); + Expression start = randomChild(); + return new Locate(source, str, substr, start); + } + + @Override + protected Locate mutateInstance(Locate instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression substr = instance.substr(); + Expression start = instance.start(); + switch (between(0, 2)) { + case 0 -> str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + case 1 -> substr = randomValueOtherThan(substr, AbstractExpressionSerializationTests::randomChild); + case 2 -> start = randomValueOtherThan(start, AbstractExpressionSerializationTests::randomChild); + } + return new Locate(source, str, substr, start); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java index 011252a3f7e14..13d8edf489a66 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.nio.charset.StandardCharsets; @@ -30,7 +30,7 @@ /** * Tests for {@link Locate} function. */ -public class LocateTests extends AbstractFunctionTestCase { +public class LocateTests extends AbstractScalarFunctionTestCase { public LocateTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeSerializationTests.java new file mode 100644 index 0000000000000..655d1a75470a3 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeSerializationTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class RLikeSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected RLike createTestInstance() { + Source source = randomSource(); + Expression child = randomChild(); + RLikePattern pattern = new RLikePattern(randomAlphaOfLength(4)); + return new RLike(source, child, pattern); + } + + @Override + protected RLike mutateInstance(RLike instance) throws IOException { + Source source = instance.source(); + Expression child = instance.field(); + RLikePattern pattern = instance.pattern(); + if (randomBoolean()) { + child = randomValueOtherThan(child, AbstractExpressionSerializationTests::randomChild); + } else { + pattern = randomValueOtherThan(pattern, () -> new RLikePattern(randomAlphaOfLength(4))); + } + return new RLike(source, child, pattern); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java index e1bcc519840be..0074f83b3bbce 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -30,7 +30,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -public class RLikeTests extends AbstractFunctionTestCase { +public class RLikeTests extends AbstractScalarFunctionTestCase { public RLikeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -38,7 +38,7 @@ public RLikeTests(@Name("TestCase") Supplier testCase @ParametersFactory public static Iterable parameters() { return parameters(str -> { - for (String syntax : new String[] { "\\", ".", "?", "+", "*", "|", "{", "}", "[", "]", "(", ")", "\"", "<", ">", "#" }) { + for (String syntax : new String[] { "\\", ".", "?", "+", "*", "|", "{", "}", "[", "]", "(", ")", "\"", "<", ">", "#", "&" }) { str = str.replace(syntax, "\\" + syntax); } return str; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimSerializationTests.java new file mode 100644 index 0000000000000..e52be87c41af0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimSerializationTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +public class RTrimSerializationTests extends AbstractUnaryScalarSerializationTests { + @Override + protected RTrim create(Source source, Expression child) { + return new RTrim(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatSerializationTests.java new file mode 100644 index 0000000000000..6abcfdc472685 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class RepeatSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Repeat createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression number = randomChild(); + return new Repeat(source, str, number); + } + + @Override + protected Repeat mutateInstance(Repeat instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression number = instance.number(); + if (randomBoolean()) { + str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + } else { + number = randomValueOtherThan(number, AbstractExpressionSerializationTests::randomChild); + } + return new Repeat(source, str, number); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatStaticTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatStaticTests.java index dc266066bd424..7c8426a5fe3fc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatStaticTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatStaticTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.junit.After; import java.util.ArrayList; @@ -36,7 +36,7 @@ /** * These tests create rows that are 1MB in size. Test classes - * which extend AbstractFunctionTestCase rerun test cases with + * which extend AbstractScalarFunctionTestCase rerun test cases with * many randomized inputs. Unfortunately, tests are run with * limited memory, and instantiating many copies of these * tests with large rows causes out of memory. @@ -63,7 +63,7 @@ public void testTooBig() { public String process(String str, int number) { try ( - var eval = AbstractFunctionTestCase.evaluator( + var eval = AbstractScalarFunctionTestCase.evaluator( new Repeat(Source.EMPTY, field("string", DataType.KEYWORD), field("number", DataType.INTEGER)) ).get(driverContext()); Block block = eval.eval(row(List.of(new BytesRef(str), number))); @@ -73,7 +73,7 @@ public String process(String str, int number) { } /** - * The following fields and methods were borrowed from AbstractFunctionTestCase + * The following fields and methods were borrowed from AbstractScalarFunctionTestCase */ private final List breakers = Collections.synchronizedList(new ArrayList<>()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java index cb89dc168b928..8d0368d1c618f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RepeatTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -public class RepeatTests extends AbstractFunctionTestCase { +public class RepeatTests extends AbstractScalarFunctionTestCase { public RepeatTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceSerializationTests.java new file mode 100644 index 0000000000000..4bc54241eca2c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceSerializationTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class ReplaceSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Replace createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression regex = randomChild(); + Expression newStr = randomChild(); + return new Replace(source, str, regex, newStr); + } + + @Override + protected Replace mutateInstance(Replace instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression regex = instance.regex(); + Expression newStr = instance.newStr(); + switch (between(0, 2)) { + case 0 -> str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + case 1 -> regex = randomValueOtherThan(regex, AbstractExpressionSerializationTests::randomChild); + case 2 -> newStr = randomValueOtherThan(newStr, AbstractExpressionSerializationTests::randomChild); + } + return new Replace(source, str, regex, newStr); + } + + @Override + protected boolean alwaysEmptySource() { + return true; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java index 82581b69f8713..fe77b9dcdb075 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class ReplaceTests extends AbstractFunctionTestCase { +public class ReplaceTests extends AbstractScalarFunctionTestCase { public ReplaceTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightSerializationTests.java new file mode 100644 index 0000000000000..7ed7345910765 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class RightSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Right createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression length = randomChild(); + return new Right(source, str, length); + } + + @Override + protected Right mutateInstance(Right instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression length = instance.length(); + if (randomBoolean()) { + str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + } else { + length = randomValueOtherThan(length, AbstractExpressionSerializationTests::randomChild); + } + return new Right(source, str, length); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java index 9d2b55e02fff7..cc98edb85f547 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -28,7 +28,7 @@ import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.equalTo; -public class RightTests extends AbstractFunctionTestCase { +public class RightTests extends AbstractScalarFunctionTestCase { public RightTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSerializationTests.java new file mode 100644 index 0000000000000..bede192c354d1 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class SplitSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Split createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression delim = randomChild(); + return new Split(source, str, delim); + } + + @Override + protected Split mutateInstance(Split instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression delim = instance.delim(); + if (randomBoolean()) { + str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + } else { + delim = randomValueOtherThan(delim, AbstractExpressionSerializationTests::randomChild); + } + return new Split(source, str, delim); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index bf2dd0359a352..dd28b43bd66ed 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -34,7 +34,7 @@ import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.equalTo; -public class SplitTests extends AbstractFunctionTestCase { +public class SplitTests extends AbstractScalarFunctionTestCase { public SplitTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithSerializationTests.java new file mode 100644 index 0000000000000..1b1167879b212 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithSerializationTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class StartsWithSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected StartsWith createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression prefix = randomChild(); + return new StartsWith(source, str, prefix); + } + + @Override + protected StartsWith mutateInstance(StartsWith instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression prefix = instance.prefix(); + if (randomBoolean()) { + str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + } else { + prefix = randomValueOtherThan(prefix, AbstractExpressionSerializationTests::randomChild); + } + return new StartsWith(source, str, prefix); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java index f0c51a9b22e55..bd01f926d1571 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -23,7 +23,7 @@ import static org.hamcrest.Matchers.equalTo; -public class StartsWithTests extends AbstractFunctionTestCase { +public class StartsWithTests extends AbstractScalarFunctionTestCase { public StartsWithTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringSerializationTests.java new file mode 100644 index 0000000000000..accbed6e8f613 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringSerializationTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class SubstringSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Substring createTestInstance() { + Source source = randomSource(); + Expression str = randomChild(); + Expression start = randomChild(); + Expression length = randomChild(); + return new Substring(source, str, start, length); + } + + @Override + protected Substring mutateInstance(Substring instance) throws IOException { + Source source = instance.source(); + Expression str = instance.str(); + Expression start = instance.start(); + Expression length = instance.length(); + switch (between(0, 2)) { + case 0 -> str = randomValueOtherThan(str, AbstractExpressionSerializationTests::randomChild); + case 1 -> start = randomValueOtherThan(start, AbstractExpressionSerializationTests::randomChild); + case 2 -> length = randomValueOtherThan(length, AbstractExpressionSerializationTests::randomChild); + } + return new Substring(source, str, start, length); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java index 0ee60cfc77d2f..1c49d3b408ad6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class SubstringTests extends AbstractFunctionTestCase { +public class SubstringTests extends AbstractScalarFunctionTestCase { public SubstringTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerSerializationTests.java index f2dbdbd74470a..caff331755f44 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerSerializationTests.java @@ -7,21 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class ToLowerSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected ToLower createTestInstance() { return new ToLower(randomSource(), randomChild(), configuration()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperSerializationTests.java index e57aedd79fdfd..97316c9cc7681 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperSerializationTests.java @@ -7,21 +7,13 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import java.io.IOException; -import java.util.List; public class ToUpperSerializationTests extends AbstractExpressionSerializationTests { - @Override - protected List getNamedWriteables() { - return EsqlScalarFunction.getNamedWriteables(); - } - @Override protected ToUpper createTestInstance() { return new ToUpper(randomSource(), randomChild(), configuration()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimSerializationTests.java new file mode 100644 index 0000000000000..a49e07fd7065c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimSerializationTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +public class TrimSerializationTests extends AbstractUnaryScalarSerializationTests { + @Override + protected Trim create(Source source, Expression child) { + return new Trim(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeSerializationTests.java new file mode 100644 index 0000000000000..1bbf124864682 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeSerializationTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class WildcardLikeSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected WildcardLike createTestInstance() { + Source source = randomSource(); + Expression child = randomChild(); + WildcardPattern pattern = new WildcardPattern(randomAlphaOfLength(4)); + return new WildcardLike(source, child, pattern); + } + + @Override + protected WildcardLike mutateInstance(WildcardLike instance) throws IOException { + Source source = instance.source(); + Expression child = instance.field(); + WildcardPattern pattern = instance.pattern(); + if (randomBoolean()) { + child = randomValueOtherThan(child, AbstractExpressionSerializationTests::randomChild); + } else { + pattern = randomValueOtherThan(pattern, () -> new WildcardPattern(randomAlphaOfLength(4))); + } + return new WildcardLike(source, child, pattern); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java index 3aee4a92e9570..06736db28b2cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLikeTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.startsWith; @FunctionName("like") -public class WildcardLikeTests extends AbstractFunctionTestCase { +public class WildcardLikeTests extends AbstractScalarFunctionTestCase { public WildcardLikeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java index 7e803ea2f84a0..974c8703b2a09 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java @@ -9,14 +9,14 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.xpack.esql.analysis.Verifier; -import org.elasticsearch.xpack.esql.core.common.Failure; +import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.esql.core.tree.Location; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -public abstract class AbstractBinaryOperatorTestCase extends AbstractFunctionTestCase { +public abstract class AbstractBinaryOperatorTestCase extends AbstractScalarFunctionTestCase { protected abstract Matcher resultsMatcher(List typedData); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java index b5bea7d858187..a5408cdb971c4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.junit.After; @@ -63,7 +64,7 @@ public BreakerTests(ByteSizeValue limit, Expression expression) { public void testBreaker() { DriverContext unlimited = driverContext(ByteSizeValue.ofGb(1)); DriverContext context = driverContext(limit); - EvalOperator.ExpressionEvaluator eval = AbstractFunctionTestCase.evaluator(expression).get(context); + EvalOperator.ExpressionEvaluator eval = AbstractScalarFunctionTestCase.evaluator(expression).get(context); try (Block b = unlimited.blockFactory().newConstantNullBlock(1)) { Exception e = expectThrows(CircuitBreakingException.class, () -> eval.eval(new Page(b))); assertThat(e.getMessage(), equalTo("over test limit")); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java index c9a7933142605..81860addf1c5e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticSerializationTests.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; import java.io.IOException; -import java.util.List; public abstract class AbstractArithmeticSerializationTests extends AbstractExpressionSerializationTests< T> { @@ -35,9 +33,4 @@ protected final T mutateInstance(T instance) throws IOException { } return create(instance.source(), left, right); } - - @Override - protected List getNamedWriteables() { - return EsqlArithmeticOperation.getNamedWriteables(); - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index 26a5d58b33900..c8a2511e34211 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -public class AddTests extends AbstractFunctionTestCase { +public class AddTests extends AbstractScalarFunctionTestCase { public AddTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java index a50d44822a4e3..7bc5b24651218 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -26,7 +26,7 @@ import static org.hamcrest.Matchers.equalTo; -public class DivTests extends AbstractFunctionTestCase { +public class DivTests extends AbstractScalarFunctionTestCase { public DivTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java index ce67f6453362b..133324bafd134 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; @@ -26,7 +26,7 @@ import static org.hamcrest.Matchers.equalTo; -public class ModTests extends AbstractFunctionTestCase { +public class ModTests extends AbstractScalarFunctionTestCase { public ModTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java index 8b4dfa88415be..7472636611063 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; @@ -24,7 +24,7 @@ import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.AbstractArithmeticTestCase.arithmeticExceptionOverflowCase; import static org.hamcrest.Matchers.equalTo; -public class MulTests extends AbstractFunctionTestCase { +public class MulTests extends AbstractScalarFunctionTestCase { public MulTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java index a628416ecc4b7..7eadd74eaeb9e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.time.Duration; @@ -28,7 +28,7 @@ import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.equalTo; -public class NegTests extends AbstractFunctionTestCase { +public class NegTests extends AbstractScalarFunctionTestCase { public NegTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index e75ee9333ba54..9dc024ac1e8ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.time.Duration; @@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class SubTests extends AbstractFunctionTestCase { +public class SubTests extends AbstractScalarFunctionTestCase { public SubTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java index 8f28cfddb1d3a..3802d301357e9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractComparisonSerializationTests.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; import java.io.IOException; -import java.util.List; public abstract class AbstractComparisonSerializationTests extends AbstractExpressionSerializationTests { protected abstract T create(Source source, Expression left, Expression right); @@ -34,9 +32,4 @@ protected final T mutateInstance(T instance) throws IOException { } return create(instance.source(), left, right); } - - @Override - protected List getNamedWriteables() { - return EsqlBinaryComparison.getNamedWriteables(); - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java index 3817bbe9cc74c..d3539f4a56fe9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -22,7 +22,7 @@ import java.util.List; import java.util.function.Supplier; -public class EqualsTests extends AbstractFunctionTestCase { +public class EqualsTests extends AbstractScalarFunctionTestCase { public EqualsTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -196,7 +196,10 @@ public static Iterable parameters() { ); return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators) + errorsForCasesWithoutExamples( + anyNullIsNull(true, suppliers), + AbstractScalarFunctionTestCase::errorMessageStringForBinaryOperators + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparisonTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparisonTests.java index cc282186d4385..540a5fafbae06 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparisonTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparisonTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonProcessor; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation; import java.io.IOException; @@ -40,16 +39,34 @@ public void testSerializationOfBinaryComparisonOperation() throws IOException { * {@link BinaryComparisonOperation} */ public void testCompatibleWithQLBinaryComparisonOperation() throws IOException { - validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.EQ, BinaryComparisonOperation.EQ); - validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.NEQ, BinaryComparisonOperation.NEQ); - validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.GT, BinaryComparisonOperation.GT); - validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.GTE, BinaryComparisonOperation.GTE); - validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.LT, BinaryComparisonOperation.LT); - validateCompatibility(BinaryComparisonProcessor.BinaryComparisonOperation.LTE, BinaryComparisonOperation.LTE); + validateCompatibility( + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.EQ, + BinaryComparisonOperation.EQ + ); + validateCompatibility( + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.NEQ, + BinaryComparisonOperation.NEQ + ); + validateCompatibility( + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.GT, + BinaryComparisonOperation.GT + ); + validateCompatibility( + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.GTE, + BinaryComparisonOperation.GTE + ); + validateCompatibility( + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.LT, + BinaryComparisonOperation.LT + ); + validateCompatibility( + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation.LTE, + BinaryComparisonOperation.LTE + ); } private static void validateCompatibility( - BinaryComparisonProcessor.BinaryComparisonOperation original, + org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparisonOperation original, BinaryComparisonOperation expected ) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java index f25638b482817..b2174f7be1593 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -23,7 +23,7 @@ import java.util.List; import java.util.function.Supplier; -public class GreaterThanOrEqualTests extends AbstractFunctionTestCase { +public class GreaterThanOrEqualTests extends AbstractScalarFunctionTestCase { public GreaterThanOrEqualTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -131,7 +131,10 @@ public static Iterable parameters() { ); return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators) + errorsForCasesWithoutExamples( + anyNullIsNull(true, suppliers), + AbstractScalarFunctionTestCase::errorMessageStringForBinaryOperators + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java index 0735e0dfd64f2..edb276e16dd99 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -23,7 +23,7 @@ import java.util.List; import java.util.function.Supplier; -public class GreaterThanTests extends AbstractFunctionTestCase { +public class GreaterThanTests extends AbstractScalarFunctionTestCase { public GreaterThanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -131,7 +131,10 @@ public static Iterable parameters() { ); return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators) + errorsForCasesWithoutExamples( + anyNullIsNull(true, suppliers), + AbstractScalarFunctionTestCase::errorMessageStringForBinaryOperators + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InSerializationTests.java new file mode 100644 index 0000000000000..8e8c6287d433d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InSerializationTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; +import java.util.List; + +public class InSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected In createTestInstance() { + Source source = randomSource(); + Expression value = randomChild(); + List list = randomList(10, AbstractExpressionSerializationTests::randomChild); + return new In(source, value, list); + } + + @Override + protected In mutateInstance(In instance) throws IOException { + Source source = instance.source(); + Expression value = instance.value(); + List list = instance.list(); + if (randomBoolean()) { + value = randomValueOtherThan(value, AbstractExpressionSerializationTests::randomChild); + } else { + list = randomValueOtherThan(list, () -> randomList(10, AbstractExpressionSerializationTests::randomChild)); + } + return new In(source, value, list); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java index d9daa27936267..7ca1e27ba510a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsSerializationTests.java @@ -7,12 +7,10 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; import java.io.IOException; -import java.util.List; public class InsensitiveEqualsSerializationTests extends AbstractExpressionSerializationTests { @Override @@ -31,9 +29,4 @@ protected final InsensitiveEquals mutateInstance(InsensitiveEquals instance) thr } return new InsensitiveEquals(instance.source(), left, right); } - - @Override - protected List getNamedWriteables() { - return List.of(InsensitiveEquals.ENTRY); - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java index 4a802dfcaf975..d89421f579b08 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -23,7 +23,7 @@ import java.util.List; import java.util.function.Supplier; -public class LessThanOrEqualTests extends AbstractFunctionTestCase { +public class LessThanOrEqualTests extends AbstractScalarFunctionTestCase { public LessThanOrEqualTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -131,7 +131,10 @@ public static Iterable parameters() { ); return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators) + errorsForCasesWithoutExamples( + anyNullIsNull(true, suppliers), + AbstractScalarFunctionTestCase::errorMessageStringForBinaryOperators + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java index 6f3f2441c6d00..9487d774ff221 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -23,7 +23,7 @@ import java.util.List; import java.util.function.Supplier; -public class LessThanTests extends AbstractFunctionTestCase { +public class LessThanTests extends AbstractScalarFunctionTestCase { public LessThanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -131,7 +131,10 @@ public static Iterable parameters() { ); return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators) + errorsForCasesWithoutExamples( + anyNullIsNull(true, suppliers), + AbstractScalarFunctionTestCase::errorMessageStringForBinaryOperators + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java index 174e2457eb0a5..e7d8c680ba5cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; @@ -21,7 +21,7 @@ import java.util.List; import java.util.function.Supplier; -public class NotEqualsTests extends AbstractFunctionTestCase { +public class NotEqualsTests extends AbstractScalarFunctionTestCase { public NotEqualsTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -190,7 +190,10 @@ public static Iterable parameters() { ) ); return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators) + errorsForCasesWithoutExamples( + anyNullIsNull(true, suppliers), + AbstractScalarFunctionTestCase::errorMessageStringForBinaryOperators + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/fulltext/AbstractFulltextSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/fulltext/AbstractFulltextSerializationTests.java index 88f88436f8a04..370cfaf67fe0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/fulltext/AbstractFulltextSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/fulltext/AbstractFulltextSerializationTests.java @@ -7,12 +7,10 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.fulltext; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.FullTextPredicate; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -20,11 +18,6 @@ public abstract class AbstractFulltextSerializationTests getNamedWriteables() { - return FullTextPredicate.getNamedWriteables(); - } - String randomOptionOrNull() { if (randomBoolean()) { return null; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java index 6da9e8ef8ba48..658f396aa027c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java @@ -17,8 +17,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.TestBlockFactory; +import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.esql.core.util.StringUtils; @@ -246,17 +246,17 @@ public void testPlainTextEmptyCursorWithoutColumns() { } private static EsqlQueryResponse emptyData() { - return new EsqlQueryResponse(singletonList(new ColumnInfo("name", "keyword")), emptyList(), null, false, false); + return new EsqlQueryResponse(singletonList(new ColumnInfoImpl("name", "keyword")), emptyList(), null, false, false); } private static EsqlQueryResponse regularData() { BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); // headers - List headers = asList( - new ColumnInfo("string", "keyword"), - new ColumnInfo("number", "integer"), - new ColumnInfo("location", "geo_point"), - new ColumnInfo("location2", "cartesian_point") + List headers = asList( + new ColumnInfoImpl("string", "keyword"), + new ColumnInfoImpl("number", "integer"), + new ColumnInfoImpl("location", "geo_point"), + new ColumnInfoImpl("location2", "cartesian_point") ); BytesRefArray geoPoints = new BytesRefArray(2, BigArrays.NON_RECYCLING_INSTANCE); @@ -283,7 +283,7 @@ private static EsqlQueryResponse regularData() { private static EsqlQueryResponse escapedData() { // headers - List headers = asList(new ColumnInfo("first", "keyword"), new ColumnInfo("\"special\"", "keyword")); + List headers = asList(new ColumnInfoImpl("first", "keyword"), new ColumnInfoImpl("\"special\"", "keyword")); // values List values = List.of( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java index 9a89f3a1275f1..273561c0348c6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java @@ -15,8 +15,8 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.geometry.Point; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.TestBlockFactory; +import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import java.util.Arrays; @@ -32,17 +32,17 @@ public class TextFormatterTests extends ESTestCase { static BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); - private final List columns = Arrays.asList( - new ColumnInfo("foo", "keyword"), - new ColumnInfo("bar", "long"), - new ColumnInfo("15charwidename!", "double"), - new ColumnInfo("null_field1", "integer"), - new ColumnInfo("superduperwidename!!!", "double"), - new ColumnInfo("baz", "keyword"), - new ColumnInfo("date", "date"), - new ColumnInfo("location", "geo_point"), - new ColumnInfo("location2", "cartesian_point"), - new ColumnInfo("null_field2", "keyword") + private final List columns = Arrays.asList( + new ColumnInfoImpl("foo", "keyword"), + new ColumnInfoImpl("bar", "long"), + new ColumnInfoImpl("15charwidename!", "double"), + new ColumnInfoImpl("null_field1", "integer"), + new ColumnInfoImpl("superduperwidename!!!", "double"), + new ColumnInfoImpl("baz", "keyword"), + new ColumnInfoImpl("date", "date"), + new ColumnInfoImpl("location", "geo_point"), + new ColumnInfoImpl("location2", "cartesian_point"), + new ColumnInfoImpl("null_field2", "keyword") ); private static final BytesRefArray geoPoints = new BytesRefArray(2, BigArrays.NON_RECYCLING_INSTANCE); @@ -183,7 +183,7 @@ public void testVeryLongPadding() { getTextBodyContent( new TextFormatter( new EsqlQueryResponse( - List.of(new ColumnInfo("foo", "keyword")), + List.of(new ColumnInfoImpl("foo", "keyword")), List.of( new Page( blockFactory.newBytesRefBlockBuilder(2) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index 57d304a4f032e..55691526ea428 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -19,42 +19,16 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.SerializationTestUtils; import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.arithmetic.ArithmeticOperation; import org.elasticsearch.xpack.esql.core.index.EsIndex; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import org.elasticsearch.xpack.esql.core.type.KeywordEsField; -import org.elasticsearch.xpack.esql.core.type.TextEsField; -import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import org.elasticsearch.xpack.esql.expression.Order; -import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; -import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; -import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Median; -import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; -import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; -import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; @@ -72,9 +46,13 @@ import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.TopN; import org.elasticsearch.xpack.esql.plan.logical.join.Join; @@ -111,13 +89,11 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.stream.Stream; import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.xpack.esql.SerializationTestUtils.serializeDeserialize; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; public class PlanNamedTypesTests extends ESTestCase { @@ -192,19 +168,6 @@ public void testLogicalPlanEntries() { assertMap(actual, matchesList(expected)); } - public void testFunctionEntries() { - var serializableFunctions = PlanNamedTypes.namedTypeEntries() - .stream() - .filter(e -> Expression.class.isAssignableFrom(e.categoryClass())) - .map(PlanNameRegistry.Entry::name) - .sorted() - .toList(); - - for (var function : new EsqlFunctionRegistry().listFunctions()) { - assertThat(serializableFunctions, hasItem(equalTo(PlanNamedTypes.name(function.clazz())))); - } - } - // Tests that all names are unique - there should be a good reason if this is not the case. public void testUniqueNames() { var actual = PlanNamedTypes.namedTypeEntries().stream().map(PlanNameRegistry.Entry::name).distinct().toList(); @@ -230,81 +193,6 @@ public void testWrappedStreamSimple() throws IOException { assertThat(in.readVInt(), equalTo(11_345)); } - public void testBinComparisonSimple() throws IOException { - var orig = new Equals(Source.EMPTY, field("foo", DataType.DOUBLE), field("bar", DataType.DOUBLE)); - BytesStreamOutput bso = new BytesStreamOutput(); - PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - out.writeNamed(Expression.class, orig); - var deser = (Equals) planStreamInput(bso).readNamed(Expression.class); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - } - - public void testBinComparison() { - Stream.generate(PlanNamedTypesTests::randomBinaryComparison).limit(100).forEach(obj -> assertNamedType(Expression.class, obj)); - } - - public void testAggFunctionSimple() throws IOException { - var orig = new Avg(Source.EMPTY, field("foo_val", DataType.DOUBLE)); - BytesStreamOutput bso = new BytesStreamOutput(); - PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - out.writeNamed(AggregateFunction.class, orig); - var deser = (Avg) planStreamInput(bso).readNamed(AggregateFunction.class); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - } - - public void testAggFunction() { - Stream.generate(PlanNamedTypesTests::randomAggFunction).limit(100).forEach(obj -> assertNamedType(AggregateFunction.class, obj)); - } - - public void testArithmeticOperationSimple() throws IOException { - var orig = new Add(Source.EMPTY, field("foo", DataType.LONG), field("bar", DataType.LONG)); - BytesStreamOutput bso = new BytesStreamOutput(); - PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - out.writeNamed(Expression.class, orig); - var deser = (Add) planStreamInput(bso).readNamed(Expression.class); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - } - - public void testArithmeticOperation() { - Stream.generate(PlanNamedTypesTests::randomArithmeticOperation).limit(100).forEach(obj -> assertNamedType(Expression.class, obj)); - } - - public void testSubStringSimple() throws IOException { - var orig = new Substring(Source.EMPTY, field("foo", DataType.KEYWORD), new Literal(Source.EMPTY, 1, DataType.INTEGER), null); - BytesStreamOutput bso = new BytesStreamOutput(); - PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - PlanNamedTypes.writeSubstring(out, orig); - var deser = PlanNamedTypes.readSubstring(planStreamInput(bso)); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - } - - public void testStartsWithSimple() throws IOException { - var orig = new StartsWith(Source.EMPTY, field("foo", DataType.KEYWORD), new Literal(Source.EMPTY, "fo", DataType.KEYWORD)); - BytesStreamOutput bso = new BytesStreamOutput(); - PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - PlanNamedTypes.writeStartsWith(out, orig); - var deser = PlanNamedTypes.readStartsWith(planStreamInput(bso)); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - } - - public void testRoundSimple() throws IOException { - var orig = new Round(Source.EMPTY, field("value", DataType.DOUBLE), new Literal(Source.EMPTY, 1, DataType.INTEGER)); - BytesStreamOutput bso = new BytesStreamOutput(); - PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - PlanNamedTypes.writeRound(out, orig); - var deser = PlanNamedTypes.readRound(planStreamInput(bso)); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - } - - public void testPowSimple() throws IOException { - var orig = new Pow(Source.EMPTY, field("value", DataType.DOUBLE), new Literal(Source.EMPTY, 1, DataType.INTEGER)); - BytesStreamOutput bso = new BytesStreamOutput(); - PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry, null); - PlanNamedTypes.writePow(out, orig); - var deser = PlanNamedTypes.readPow(planStreamInput(bso)); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - } - public void testFieldSortSimple() throws IOException { var orig = new EsQueryExec.FieldSort(field("val", DataType.LONG), Order.OrderDirection.ASC, Order.NullsPosition.FIRST); BytesStreamOutput bso = new BytesStreamOutput(); @@ -394,16 +282,6 @@ static EsIndex randomEsIndex() { ); } - static UnsupportedAttribute randomUnsupportedAttribute() { - return new UnsupportedAttribute( - Source.EMPTY, - randomAlphaOfLength(randomIntBetween(1, 25)), // name - randomUnsupportedEsField(), // field - randomStringOrNull(), // customMessage - nameIdOrNull() - ); - } - static FieldAttribute randomFieldAttributeOrNull() { return randomBoolean() ? randomFieldAttribute() : null; } @@ -433,22 +311,6 @@ static KeywordEsField randomKeywordEsField() { ); } - static TextEsField randomTextEsField() { - return new TextEsField( - randomAlphaOfLength(randomIntBetween(1, 25)), // name - randomProperties(), - randomBoolean(), // hasDocValues - randomBoolean() // alias - ); - } - - static InvalidMappedField randomInvalidMappedField() { - return new InvalidMappedField( - randomAlphaOfLength(randomIntBetween(1, 25)), // name - randomAlphaOfLength(randomIntBetween(1, 25)) // error message - ); - } - static EsqlBinaryComparison randomBinaryComparison() { int v = randomIntBetween(0, 5); var left = field(randomName(), randomDataType()); @@ -464,25 +326,6 @@ static EsqlBinaryComparison randomBinaryComparison() { }; } - static AggregateFunction randomAggFunction() { - int v = randomIntBetween(0, 8); - var field = field(randomName(), randomDataType()); - var right = field(randomName(), randomDataType()); - return switch (v) { - case 0 -> new Avg(Source.EMPTY, field); - case 1 -> new Count(Source.EMPTY, field); - case 2 -> new Sum(Source.EMPTY, field); - case 3 -> new Min(Source.EMPTY, field); - case 4 -> new Max(Source.EMPTY, field); - case 5 -> new Median(Source.EMPTY, field); - case 6 -> new MedianAbsoluteDeviation(Source.EMPTY, field); - case 7 -> new CountDistinct(Source.EMPTY, field, right); - case 8 -> new Percentile(Source.EMPTY, field, right); - case 9 -> new SpatialCentroid(Source.EMPTY, field); - default -> throw new AssertionError(v); - }; - } - static ArithmeticOperation randomArithmeticOperation() { int v = randomIntBetween(0, 4); var left = field(randomName(), randomDataType()); @@ -525,15 +368,6 @@ static EsField randomEsField(int depth) { ); } - static UnsupportedEsField randomUnsupportedEsField() { - return new UnsupportedEsField( - randomAlphaOfLength(randomIntBetween(1, 25)), // name - randomAlphaOfLength(randomIntBetween(1, 25)), // originalType - randomAlphaOfLength(randomIntBetween(1, 25)), // inherited - randomProperties() - ); - } - static Map randomProperties() { return randomProperties(0); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInputTests.java index 5788f218564c9..55763d9ec6e7b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInputTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInputTests.java @@ -10,10 +10,10 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NameId; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index 40c45a288ae88..2049fd5592d82 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -14,17 +14,18 @@ import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; @@ -34,7 +35,14 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.stats.SearchStats; @@ -181,6 +189,79 @@ public void testMissingFieldInSort() { var source = as(limit.child(), EsRelation.class); } + /** + * Expects + * EsqlProject[[first_name{f}#6]] + * \_Limit[1000[INTEGER]] + * \_MvExpand[last_name{f}#9,last_name{r}#15] + * \_Limit[1000[INTEGER]] + * \_EsRelation[test][_meta_field{f}#11, emp_no{f}#5, first_name{f}#6, ge..] + */ + public void testMissingFieldInMvExpand() { + var plan = plan(""" + from test + | mv_expand last_name + | keep first_name, last_name + """); + + var testStats = statsForMissingField("last_name"); + var localPlan = localPlan(plan, testStats); + + var project = as(localPlan, EsqlProject.class); + var projections = project.projections(); + assertThat(Expressions.names(projections), contains("first_name", "last_name")); + + var limit = as(project.child(), Limit.class); + // MvExpand cannot be optimized (yet) because the target NamedExpression cannot be replaced with a NULL literal + // https://github.com/elastic/elasticsearch/issues/109974 + // See LocalLogicalPlanOptimizer.ReplaceMissingFieldWithNull + var mvExpand = as(limit.child(), MvExpand.class); + var limit2 = as(mvExpand.child(), Limit.class); + as(limit2.child(), EsRelation.class); + } + + public static class MockFieldAttributeCommand extends UnaryPlan { + public FieldAttribute field; + + public MockFieldAttributeCommand(Source source, LogicalPlan child, FieldAttribute field) { + super(source, child); + this.field = field; + } + + @Override + public UnaryPlan replaceChild(LogicalPlan newChild) { + return new MockFieldAttributeCommand(source(), newChild, field); + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public List output() { + return List.of(field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MockFieldAttributeCommand::new, child(), field); + } + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/110150") + public void testMissingFieldInNewCommand() { + var testStats = statsForMissingField("last_name"); + localPlan( + new MockFieldAttributeCommand( + EMPTY, + new Row(EMPTY, List.of()), + new FieldAttribute(EMPTY, "last_name", new EsField("last_name", DataType.KEYWORD, Map.of(), true)) + ), + testStats + ); + } + /** * Expects * EsqlProject[[x{r}#3]] diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 74bdcf824ba80..dea3a974fbd5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer; +import org.elasticsearch.Build; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.QuantileStates; @@ -41,11 +42,6 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.core.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; @@ -57,13 +53,18 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; +import org.elasticsearch.xpack.esql.expression.function.aggregate.FromPartial; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; import org.elasticsearch.xpack.esql.expression.function.aggregate.Median; import org.elasticsearch.xpack.esql.expression.function.aggregate.MedianAbsoluteDeviation; import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.esql.expression.function.aggregate.ToPartial; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; +import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; @@ -110,16 +111,22 @@ import org.elasticsearch.xpack.esql.optimizer.rules.PushDownAndCombineLimits; import org.elasticsearch.xpack.esql.optimizer.rules.SplitInWithFoldableValue; import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; @@ -152,6 +159,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.localSource; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.analysis.Analyzer.NO_FIELDS; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; import static org.elasticsearch.xpack.esql.core.expression.Literal.NULL; import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; @@ -205,6 +213,9 @@ public class LogicalPlanOptimizerTests extends ESTestCase { private static EnrichResolution enrichResolution; private static final LiteralsOnTheRight LITERALS_ON_THE_RIGHT = new LiteralsOnTheRight(); + private static Map metricMapping; + private static Analyzer metricsAnalyzer; + private static class SubstitutionOnlyOptimizer extends LogicalPlanOptimizer { static SubstitutionOnlyOptimizer INSTANCE = new SubstitutionOnlyOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); @@ -260,6 +271,13 @@ public static void init() { new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResultExtra, enrichResolution), TEST_VERIFIER ); + + metricMapping = loadMapping("k8s-mappings.json"); + var metricsIndex = IndexResolution.valid(new EsIndex("k8s", metricMapping, Set.of("k8s"))); + metricsAnalyzer = new Analyzer( + new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), metricsIndex, enrichResolution), + TEST_VERIFIER + ); } public void testEmptyProjections() { @@ -802,7 +820,13 @@ public void testSelectivelyPushDownFilterPastFunctionAgg() { Filter fa = new Filter(EMPTY, relation, conditionA); // invalid aggregate but that's fine cause its properties are not used by this rule - Aggregate aggregate = new Aggregate(EMPTY, fa, singletonList(getFieldAttribute("b")), emptyList()); + Aggregate aggregate = new Aggregate( + EMPTY, + fa, + Aggregate.AggregateType.STANDARD, + singletonList(getFieldAttribute("b")), + emptyList() + ); Filter fb = new Filter(EMPTY, aggregate, new And(EMPTY, aggregateCondition, conditionB)); // expected @@ -811,6 +835,7 @@ public void testSelectivelyPushDownFilterPastFunctionAgg() { new Aggregate( EMPTY, new Filter(EMPTY, relation, new And(EMPTY, conditionA, conditionB)), + Aggregate.AggregateType.STANDARD, singletonList(getFieldAttribute("b")), emptyList() ), @@ -4986,11 +5011,16 @@ public void testIsNullDisjunction() throws Exception { * } */ public void testLookupSimple() { - var plan = optimizedPlan(""" + String query = """ FROM test | RENAME languages AS int - | LOOKUP int_number_names ON int - """); + | LOOKUP int_number_names ON int"""; + if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> analyze(query)); + assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); + return; + } + var plan = optimizedPlan(query); var join = as(plan, Join.class); // Right is the lookup table @@ -5008,12 +5038,14 @@ public void testLookupSimple() { assertThat(join.config().type(), equalTo(JoinType.LEFT)); assertThat(join.config().matchFields().stream().map(Object::toString).toList(), matchesList().item(startsWith("int{r}"))); - assertThat(join.config().conditions().size(), equalTo(1)); - Equals eq = as(join.config().conditions().get(0), Equals.class); - assertThat(eq.left().toString(), startsWith("int{r}")); - assertThat(eq.right().toString(), startsWith("int{r}")); - assertTrue(join.children().get(0).outputSet() + " contains " + eq.left(), join.children().get(0).outputSet().contains(eq.left())); - assertTrue(join.children().get(1).outputSet() + " contains " + eq.right(), join.children().get(1).outputSet().contains(eq.right())); + assertThat(join.config().leftFields().size(), equalTo(1)); + assertThat(join.config().rightFields().size(), equalTo(1)); + Attribute lhs = join.config().leftFields().get(0); + Attribute rhs = join.config().rightFields().get(0); + assertThat(lhs.toString(), startsWith("int{r}")); + assertThat(rhs.toString(), startsWith("int{r}")); + assertTrue(join.children().get(0).outputSet() + " contains " + lhs, join.children().get(0).outputSet().contains(lhs)); + assertTrue(join.children().get(1).outputSet() + " contains " + rhs, join.children().get(1).outputSet().contains(rhs)); // Join's output looks sensible too assertMap( @@ -5057,12 +5089,17 @@ public void testLookupSimple() { * } */ public void testLookupStats() { - var plan = optimizedPlan(""" + String query = """ FROM test | RENAME languages AS int | LOOKUP int_number_names ON int - | STATS MIN(emp_no) BY name - """); + | STATS MIN(emp_no) BY name"""; + if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> analyze(query)); + assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); + return; + } + var plan = optimizedPlan(query); var limit = as(plan, Limit.class); assertThat(limit.limit().fold(), equalTo(1000)); @@ -5088,10 +5125,12 @@ public void testLookupStats() { assertThat(join.config().type(), equalTo(JoinType.LEFT)); assertThat(join.config().matchFields().stream().map(Object::toString).toList(), matchesList().item(startsWith("int{r}"))); - assertThat(join.config().conditions().size(), equalTo(1)); - Equals eq = as(join.config().conditions().get(0), Equals.class); - assertThat(eq.left().toString(), startsWith("int{r}")); - assertThat(eq.right().toString(), startsWith("int{r}")); + assertThat(join.config().leftFields().size(), equalTo(1)); + assertThat(join.config().rightFields().size(), equalTo(1)); + Attribute lhs = join.config().leftFields().get(0); + Attribute rhs = join.config().rightFields().get(0); + assertThat(lhs.toString(), startsWith("int{r}")); + assertThat(rhs.toString(), startsWith("int{r}")); // Join's output looks sensible too assertMap( @@ -5121,6 +5160,416 @@ public void testLookupStats() { ); } + public void testTranslateMetricsWithoutGrouping() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = "METRICS k8s max(rate(network.total_bytes_in))"; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Limit limit = as(plan, Limit.class); + Aggregate finalAggs = as(limit.child(), Aggregate.class); + Aggregate aggsByTsid = as(finalAggs.child(), Aggregate.class); + as(aggsByTsid.child(), EsRelation.class); + + assertThat(finalAggs.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + assertThat(finalAggs.aggregates(), hasSize(1)); + Max max = as(Alias.unwrap(finalAggs.aggregates().get(0)), Max.class); + assertThat(Expressions.attribute(max.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(finalAggs.groupings(), empty()); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + assertThat(aggsByTsid.aggregates(), hasSize(1)); // _tsid is dropped + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + } + + public void testTranslateMixedAggsWithoutGrouping() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = "METRICS k8s max(rate(network.total_bytes_in)), max(network.cost)"; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Limit limit = as(plan, Limit.class); + Aggregate finalAggs = as(limit.child(), Aggregate.class); + Aggregate aggsByTsid = as(finalAggs.child(), Aggregate.class); + as(aggsByTsid.child(), EsRelation.class); + + assertThat(finalAggs.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + assertThat(finalAggs.aggregates(), hasSize(2)); + Max maxRate = as(Alias.unwrap(finalAggs.aggregates().get(0)), Max.class); + FromPartial maxCost = as(Alias.unwrap(finalAggs.aggregates().get(1)), FromPartial.class); + assertThat(Expressions.attribute(maxRate.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(Expressions.attribute(maxCost.field()).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + assertThat(finalAggs.groupings(), empty()); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + assertThat(aggsByTsid.aggregates(), hasSize(2)); + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + ToPartial toPartialMaxCost = as(Alias.unwrap(aggsByTsid.aggregates().get(1)), ToPartial.class); + assertThat(Expressions.attribute(toPartialMaxCost.field()).name(), equalTo("network.cost")); + } + + public void testTranslateMixedAggsWithMathWithoutGrouping() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = "METRICS k8s max(rate(network.total_bytes_in)), max(network.cost + 0.2) * 1.1"; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Project project = as(plan, Project.class); + Eval mulEval = as(project.child(), Eval.class); + assertThat(mulEval.fields(), hasSize(1)); + Mul mul = as(Alias.unwrap(mulEval.fields().get(0)), Mul.class); + Limit limit = as(mulEval.child(), Limit.class); + Aggregate finalAggs = as(limit.child(), Aggregate.class); + assertThat(finalAggs.aggregates(), hasSize(2)); + Aggregate aggsByTsid = as(finalAggs.child(), Aggregate.class); + assertThat(aggsByTsid.aggregates(), hasSize(2)); + Eval addEval = as(aggsByTsid.child(), Eval.class); + assertThat(addEval.fields(), hasSize(1)); + Add add = as(Alias.unwrap(addEval.fields().get(0)), Add.class); + as(addEval.child(), EsRelation.class); + + assertThat(Expressions.attribute(mul.left()).id(), equalTo(finalAggs.aggregates().get(1).id())); + assertThat(mul.right().fold(), equalTo(1.1)); + + assertThat(finalAggs.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + Max maxRate = as(Alias.unwrap(finalAggs.aggregates().get(0)), Max.class); + FromPartial maxCost = as(Alias.unwrap(finalAggs.aggregates().get(1)), FromPartial.class); + assertThat(Expressions.attribute(maxRate.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(Expressions.attribute(maxCost.field()).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + assertThat(finalAggs.groupings(), empty()); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + ToPartial toPartialMaxCost = as(Alias.unwrap(aggsByTsid.aggregates().get(1)), ToPartial.class); + assertThat(Expressions.attribute(toPartialMaxCost.field()).id(), equalTo(addEval.fields().get(0).id())); + assertThat(Expressions.attribute(add.left()).name(), equalTo("network.cost")); + assertThat(add.right().fold(), equalTo(0.2)); + } + + public void testTranslateMetricsGroupedByOneDimension() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = "METRICS k8s sum(rate(network.total_bytes_in)) BY cluster | SORT cluster | LIMIT 10"; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + TopN topN = as(plan, TopN.class); + Aggregate aggsByCluster = as(topN.child(), Aggregate.class); + assertThat(aggsByCluster.aggregates(), hasSize(2)); + Aggregate aggsByTsid = as(aggsByCluster.child(), Aggregate.class); + assertThat(aggsByTsid.aggregates(), hasSize(2)); // _tsid is dropped + as(aggsByTsid.child(), EsRelation.class); + + assertThat(aggsByCluster.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + Sum sum = as(Alias.unwrap(aggsByCluster.aggregates().get(0)), Sum.class); + assertThat(Expressions.attribute(sum.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(aggsByCluster.groupings(), hasSize(1)); + assertThat(Expressions.attribute(aggsByCluster.groupings().get(0)).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + Values values = as(Alias.unwrap(aggsByTsid.aggregates().get(1)), Values.class); + assertThat(Expressions.attribute(values.field()).name(), equalTo("cluster")); + } + + public void testTranslateMetricsGroupedByTwoDimension() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = "METRICS k8s avg(rate(network.total_bytes_in)) BY cluster, pod"; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Project project = as(plan, Project.class); + Eval eval = as(project.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + Limit limit = as(eval.child(), Limit.class); + Aggregate finalAggs = as(limit.child(), Aggregate.class); + assertThat(finalAggs.aggregates(), hasSize(4)); + Aggregate aggsByTsid = as(finalAggs.child(), Aggregate.class); + assertThat(aggsByTsid.aggregates(), hasSize(3)); // _tsid is dropped + as(aggsByTsid.child(), EsRelation.class); + + Div div = as(Alias.unwrap(eval.fields().get(0)), Div.class); + assertThat(Expressions.attribute(div.left()).id(), equalTo(finalAggs.aggregates().get(0).id())); + assertThat(Expressions.attribute(div.right()).id(), equalTo(finalAggs.aggregates().get(1).id())); + + assertThat(finalAggs.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + Sum sum = as(Alias.unwrap(finalAggs.aggregates().get(0)), Sum.class); + assertThat(Expressions.attribute(sum.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + Count count = as(Alias.unwrap(finalAggs.aggregates().get(1)), Count.class); + assertThat(Expressions.attribute(count.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(finalAggs.groupings(), hasSize(2)); + assertThat(Expressions.attribute(finalAggs.groupings().get(0)).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + assertThat(Expressions.attribute(finalAggs.groupings().get(1)).id(), equalTo(aggsByTsid.aggregates().get(2).id())); + + assertThat(finalAggs.groupings(), hasSize(2)); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + assertThat(aggsByTsid.aggregates(), hasSize(3)); // rates, values(cluster), values(pod) + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + Values values1 = as(Alias.unwrap(aggsByTsid.aggregates().get(1)), Values.class); + assertThat(Expressions.attribute(values1.field()).name(), equalTo("cluster")); + Values values2 = as(Alias.unwrap(aggsByTsid.aggregates().get(2)), Values.class); + assertThat(Expressions.attribute(values2.field()).name(), equalTo("pod")); + } + + public void testTranslateMetricsGroupedByTimeBucket() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = "METRICS k8s sum(rate(network.total_bytes_in)) BY bucket(@timestamp, 1h)"; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Limit limit = as(plan, Limit.class); + Aggregate finalAgg = as(limit.child(), Aggregate.class); + assertThat(finalAgg.aggregates(), hasSize(2)); + Aggregate aggsByTsid = as(finalAgg.child(), Aggregate.class); + assertThat(aggsByTsid.aggregates(), hasSize(2)); // _tsid is dropped + Eval eval = as(aggsByTsid.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + as(eval.child(), EsRelation.class); + + assertThat(finalAgg.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + Sum sum = as(Alias.unwrap(finalAgg.aggregates().get(0)), Sum.class); + assertThat(Expressions.attribute(sum.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(finalAgg.groupings(), hasSize(1)); + assertThat(Expressions.attribute(finalAgg.groupings().get(0)).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + assertThat(Expressions.attribute(aggsByTsid.groupings().get(1)).id(), equalTo(eval.fields().get(0).id())); + Bucket bucket = as(Alias.unwrap(eval.fields().get(0)), Bucket.class); + assertThat(Expressions.attribute(bucket.field()).name(), equalTo("@timestamp")); + } + + public void testTranslateMetricsGroupedByTimeBucketAndDimensions() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = """ + METRICS k8s avg(rate(network.total_bytes_in)) BY pod, bucket(@timestamp, 5 minute), cluster + | SORT cluster + | LIMIT 10 + """; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Project project = as(plan, Project.class); + TopN topN = as(project.child(), TopN.class); + Eval eval = as(topN.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + Div div = as(Alias.unwrap(eval.fields().get(0)), Div.class); + Aggregate finalAgg = as(eval.child(), Aggregate.class); + Aggregate aggsByTsid = as(finalAgg.child(), Aggregate.class); + Eval bucket = as(aggsByTsid.child(), Eval.class); + as(bucket.child(), EsRelation.class); + assertThat(Expressions.attribute(div.left()).id(), equalTo(finalAgg.aggregates().get(0).id())); + assertThat(Expressions.attribute(div.right()).id(), equalTo(finalAgg.aggregates().get(1).id())); + + assertThat(finalAgg.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + assertThat(finalAgg.aggregates(), hasSize(5)); // sum, count, pod, bucket, cluster + Sum sum = as(Alias.unwrap(finalAgg.aggregates().get(0)), Sum.class); + Count count = as(Alias.unwrap(finalAgg.aggregates().get(1)), Count.class); + assertThat(Expressions.attribute(sum.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(Expressions.attribute(count.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(finalAgg.groupings(), hasSize(3)); + assertThat(Expressions.attribute(finalAgg.groupings().get(0)).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + assertThat(aggsByTsid.aggregates(), hasSize(4)); // rate, values(pod), values(cluster), bucket + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + Values podValues = as(Alias.unwrap(aggsByTsid.aggregates().get(1)), Values.class); + assertThat(Expressions.attribute(podValues.field()).name(), equalTo("pod")); + Values clusterValues = as(Alias.unwrap(aggsByTsid.aggregates().get(3)), Values.class); + assertThat(Expressions.attribute(clusterValues.field()).name(), equalTo("cluster")); + } + + public void testTranslateMixedAggsGroupedByTimeBucketAndDimensions() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = """ + METRICS k8s avg(rate(network.total_bytes_in)), avg(network.cost) BY bucket(@timestamp, 5 minute), cluster + | SORT cluster + | LIMIT 10 + """; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Project project = as(plan, Project.class); + TopN topN = as(project.child(), TopN.class); + Eval eval = as(topN.child(), Eval.class); + assertThat(eval.fields(), hasSize(2)); + Div div = as(Alias.unwrap(eval.fields().get(0)), Div.class); + Aggregate finalAgg = as(eval.child(), Aggregate.class); + Aggregate aggsByTsid = as(finalAgg.child(), Aggregate.class); + Eval bucket = as(aggsByTsid.child(), Eval.class); + as(bucket.child(), EsRelation.class); + assertThat(Expressions.attribute(div.left()).id(), equalTo(finalAgg.aggregates().get(0).id())); + assertThat(Expressions.attribute(div.right()).id(), equalTo(finalAgg.aggregates().get(1).id())); + + assertThat(finalAgg.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + assertThat(finalAgg.aggregates(), hasSize(6)); // sum, count, sum, count, bucket, cluster + Sum sumRate = as(Alias.unwrap(finalAgg.aggregates().get(0)), Sum.class); + Count countRate = as(Alias.unwrap(finalAgg.aggregates().get(1)), Count.class); + assertThat(Expressions.attribute(sumRate.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(Expressions.attribute(countRate.field()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + + FromPartial sumCost = as(Alias.unwrap(finalAgg.aggregates().get(2)), FromPartial.class); + FromPartial countCost = as(Alias.unwrap(finalAgg.aggregates().get(3)), FromPartial.class); + assertThat(Expressions.attribute(sumCost.field()).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + assertThat(Expressions.attribute(countCost.field()).id(), equalTo(aggsByTsid.aggregates().get(2).id())); + + assertThat(finalAgg.groupings(), hasSize(2)); + assertThat(Expressions.attribute(finalAgg.groupings().get(0)).id(), equalTo(aggsByTsid.aggregates().get(3).id())); + + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + assertThat(aggsByTsid.aggregates(), hasSize(5)); // rate, to_partial(sum(cost)), to_partial(count(cost)), values(cluster), bucket + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + ToPartial toPartialSum = as(Alias.unwrap(aggsByTsid.aggregates().get(1)), ToPartial.class); + assertThat(toPartialSum.function(), instanceOf(Sum.class)); + assertThat(Expressions.attribute(toPartialSum.field()).name(), equalTo("network.cost")); + ToPartial toPartialCount = as(Alias.unwrap(aggsByTsid.aggregates().get(2)), ToPartial.class); + assertThat(toPartialCount.function(), instanceOf(Count.class)); + assertThat(Expressions.attribute(toPartialCount.field()).name(), equalTo("network.cost")); + Values clusterValues = as(Alias.unwrap(aggsByTsid.aggregates().get(4)), Values.class); + assertThat(Expressions.attribute(clusterValues.field()).name(), equalTo("cluster")); + } + + public void testAdjustMetricsRateBeforeFinalAgg() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = """ + METRICS k8s avg(round(1.05 * rate(network.total_bytes_in))) BY bucket(@timestamp, 1 minute), cluster + | SORT cluster + | LIMIT 10 + """; + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + Project project = as(plan, Project.class); + TopN topN = as(project.child(), TopN.class); + Eval evalDiv = as(topN.child(), Eval.class); + assertThat(evalDiv.fields(), hasSize(1)); + Div div = as(Alias.unwrap(evalDiv.fields().get(0)), Div.class); + + Aggregate finalAgg = as(evalDiv.child(), Aggregate.class); + assertThat(finalAgg.aggregates(), hasSize(4)); // sum, count, bucket, cluster + assertThat(finalAgg.groupings(), hasSize(2)); + + Eval evalRound = as(finalAgg.child(), Eval.class); + Round round = as(Alias.unwrap(evalRound.fields().get(0)), Round.class); + Mul mul = as(round.field(), Mul.class); + + Aggregate aggsByTsid = as(evalRound.child(), Aggregate.class); + assertThat(aggsByTsid.aggregates(), hasSize(3)); // rate, cluster, bucket + assertThat(aggsByTsid.groupings(), hasSize(2)); + + Eval evalBucket = as(aggsByTsid.child(), Eval.class); + assertThat(evalBucket.fields(), hasSize(1)); + Bucket bucket = as(Alias.unwrap(evalBucket.fields().get(0)), Bucket.class); + as(evalBucket.child(), EsRelation.class); + + assertThat(Expressions.attribute(div.left()).id(), equalTo(finalAgg.aggregates().get(0).id())); + assertThat(Expressions.attribute(div.right()).id(), equalTo(finalAgg.aggregates().get(1).id())); + + assertThat(finalAgg.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + + Sum sum = as(Alias.unwrap(finalAgg.aggregates().get(0)), Sum.class); + Count count = as(Alias.unwrap(finalAgg.aggregates().get(1)), Count.class); + assertThat(Expressions.attribute(sum.field()).id(), equalTo(evalRound.fields().get(0).id())); + assertThat(Expressions.attribute(count.field()).id(), equalTo(evalRound.fields().get(0).id())); + + assertThat( + Expressions.attribute(finalAgg.groupings().get(0)).id(), + equalTo(Expressions.attribute(aggsByTsid.groupings().get(1)).id()) + ); + assertThat(Expressions.attribute(finalAgg.groupings().get(1)).id(), equalTo(aggsByTsid.aggregates().get(1).id())); + + assertThat(Expressions.attribute(mul.left()).id(), equalTo(aggsByTsid.aggregates().get(0).id())); + assertThat(mul.right().fold(), equalTo(1.05)); + assertThat(aggsByTsid.aggregateType(), equalTo(Aggregate.AggregateType.METRICS)); + Rate rate = as(Alias.unwrap(aggsByTsid.aggregates().get(0)), Rate.class); + assertThat(Expressions.attribute(rate.field()).name(), equalTo("network.total_bytes_in")); + Values values = as(Alias.unwrap(aggsByTsid.aggregates().get(1)), Values.class); + assertThat(Expressions.attribute(values.field()).name(), equalTo("cluster")); + } + + public void testMetricsWithoutRate() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + List queries = List.of(""" + METRICS k8s count(to_long(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """, """ + METRICS k8s | STATS count(to_long(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """, """ + FROM k8s | STATS count(to_long(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """); + List plans = new ArrayList<>(); + for (String query : queries) { + var plan = logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))); + plans.add(plan); + } + for (LogicalPlan plan : plans) { + Limit limit = as(plan, Limit.class); + Aggregate aggregate = as(limit.child(), Aggregate.class); + assertThat(aggregate.aggregateType(), equalTo(Aggregate.AggregateType.STANDARD)); + assertThat(aggregate.aggregates(), hasSize(2)); + assertThat(aggregate.groupings(), hasSize(1)); + Eval eval = as(aggregate.child(), Eval.class); + assertThat(eval.fields(), hasSize(2)); + assertThat(Alias.unwrap(eval.fields().get(0)), instanceOf(Bucket.class)); + assertThat(Alias.unwrap(eval.fields().get(1)), instanceOf(ToLong.class)); + EsRelation relation = as(eval.child(), EsRelation.class); + assertThat(relation.indexMode(), equalTo(IndexMode.STANDARD)); + } + for (int i = 1; i < plans.size(); i++) { + assertThat(plans.get(i), equalTo(plans.get(0))); + } + } + + public void testRateInStats() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + var query = """ + METRICS k8s | STATS max(rate(network.total_bytes_in)) BY bucket(@timestamp, 1 minute) + | LIMIT 10 + """; + VerificationException error = expectThrows( + VerificationException.class, + () -> logicalOptimizer.optimize(metricsAnalyzer.analyze(parser.createStatement(query))) + ); + assertThat(error.getMessage(), equalTo(""" + Found 1 problem + line 1:25: the rate aggregate[rate(network.total_bytes_in)] can only be used within the metrics command""")); + } + + public void testMvSortInvalidOrder() { + VerificationException e = expectThrows(VerificationException.class, () -> plan(""" + from test + | EVAL sd = mv_sort(salary, "ABC") + """)); + assertTrue(e.getMessage().startsWith("Found ")); + final String header = "Found 1 problem\nline "; + assertEquals( + "2:29: Invalid order value in [mv_sort(salary, \"ABC\")], expected one of [ASC, DESC] but got [ABC]", + e.getMessage().substring(header.length()) + ); + + e = expectThrows(VerificationException.class, () -> plan(""" + from test + | EVAL order = "ABC", sd = mv_sort(salary, order) + """)); + assertTrue(e.getMessage().startsWith("Found ")); + assertEquals( + "2:16: Invalid order value in [mv_sort(salary, order)], expected one of [ASC, DESC] but got [ABC]", + e.getMessage().substring(header.length()) + ); + + e = expectThrows(VerificationException.class, () -> plan(""" + from test + | EVAL order = concat("d", "sc"), sd = mv_sort(salary, order) + """)); + assertTrue(e.getMessage().startsWith("Found ")); + assertEquals( + "2:16: Invalid order value in [mv_sort(salary, order)], expected one of [ASC, DESC] but got [dsc]", + e.getMessage().substring(header.length()) + ); + + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> plan(""" + row v = [1, 2, 3] | EVAL sd = mv_sort(v, "dsc") + """)); + assertEquals("Invalid order value in [mv_sort(v, \"dsc\")], expected one of [ASC, DESC] but got [dsc]", iae.getMessage()); + + iae = expectThrows(IllegalArgumentException.class, () -> plan(""" + row v = [1, 2, 3], o = concat("d", "sc") | EVAL sd = mv_sort(v, o) + """)); + assertEquals("Invalid order value in [mv_sort(v, o)], expected one of [ASC, DESC] but got [dsc]", iae.getMessage()); + } + private Literal nullOf(DataType dataType) { return new Literal(Source.EMPTY, null, dataType); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java deleted file mode 100644 index b550f6e6090da..0000000000000 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java +++ /dev/null @@ -1,860 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.optimizer; - -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; -import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.Nullability; -import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; -import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.core.expression.predicate.Range; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.Like; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.LikePattern; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLike; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardLike; -import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.FoldNull; -import org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.PropagateNullable; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.util.StringUtils; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.optimizer.rules.BooleanFunctionEqualsElimination; -import org.elasticsearch.xpack.esql.optimizer.rules.CombineDisjunctionsToIn; -import org.elasticsearch.xpack.esql.optimizer.rules.ConstantFolding; -import org.elasticsearch.xpack.esql.optimizer.rules.LiteralsOnTheRight; -import org.elasticsearch.xpack.esql.optimizer.rules.PropagateEquals; -import org.elasticsearch.xpack.esql.optimizer.rules.ReplaceRegexMatch; - -import java.util.List; - -import static java.util.Arrays.asList; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.FIVE; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.FOUR; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.ONE; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.THREE; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.TWO; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.equalsOf; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOf; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOrEqualOf; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOf; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOrEqualOf; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.notEqualsOf; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.of; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.rangeOf; -import static org.elasticsearch.xpack.esql.EsqlTestUtils.relation; -import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; -import static org.elasticsearch.xpack.esql.core.expression.Literal.NULL; -import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; -import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; -import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; -import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; -import static org.hamcrest.Matchers.contains; - -public class OptimizerRulesTests extends ESTestCase { - private static final Expression DUMMY_EXPRESSION = - new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 0); - - // - // Constant folding - // - - public void testConstantFolding() { - Expression exp = new Add(EMPTY, TWO, THREE); - - assertTrue(exp.foldable()); - Expression result = new ConstantFolding().rule(exp); - assertTrue(result instanceof Literal); - assertEquals(5, ((Literal) result).value()); - - // check now with an alias - result = new ConstantFolding().rule(new Alias(EMPTY, "a", exp)); - assertEquals("a", Expressions.name(result)); - assertEquals(Alias.class, result.getClass()); - } - - public void testConstantFoldingBinaryComparison() { - assertEquals(FALSE, new ConstantFolding().rule(greaterThanOf(TWO, THREE)).canonical()); - assertEquals(FALSE, new ConstantFolding().rule(greaterThanOrEqualOf(TWO, THREE)).canonical()); - assertEquals(FALSE, new ConstantFolding().rule(equalsOf(TWO, THREE)).canonical()); - assertEquals(TRUE, new ConstantFolding().rule(notEqualsOf(TWO, THREE)).canonical()); - assertEquals(TRUE, new ConstantFolding().rule(lessThanOrEqualOf(TWO, THREE)).canonical()); - assertEquals(TRUE, new ConstantFolding().rule(lessThanOf(TWO, THREE)).canonical()); - } - - public void testConstantFoldingBinaryLogic() { - assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, greaterThanOf(TWO, THREE), TRUE)).canonical()); - assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, greaterThanOrEqualOf(TWO, THREE), TRUE)).canonical()); - } - - public void testConstantFoldingBinaryLogic_WithNullHandling() { - assertEquals(Nullability.TRUE, new ConstantFolding().rule(new And(EMPTY, NULL, TRUE)).canonical().nullable()); - assertEquals(Nullability.TRUE, new ConstantFolding().rule(new And(EMPTY, TRUE, NULL)).canonical().nullable()); - assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, NULL, FALSE)).canonical()); - assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, FALSE, NULL)).canonical()); - assertEquals(Nullability.TRUE, new ConstantFolding().rule(new And(EMPTY, NULL, NULL)).canonical().nullable()); - - assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, TRUE)).canonical()); - assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, TRUE, NULL)).canonical()); - assertEquals(Nullability.TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, FALSE)).canonical().nullable()); - assertEquals(Nullability.TRUE, new ConstantFolding().rule(new Or(EMPTY, FALSE, NULL)).canonical().nullable()); - assertEquals(Nullability.TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, NULL)).canonical().nullable()); - } - - public void testConstantFoldingRange() { - assertEquals(true, new ConstantFolding().rule(rangeOf(FIVE, FIVE, true, new Literal(EMPTY, 10, DataType.INTEGER), false)).fold()); - assertEquals(false, new ConstantFolding().rule(rangeOf(FIVE, FIVE, false, new Literal(EMPTY, 10, DataType.INTEGER), false)).fold()); - } - - public void testConstantNot() { - assertEquals(FALSE, new ConstantFolding().rule(new Not(EMPTY, TRUE))); - assertEquals(TRUE, new ConstantFolding().rule(new Not(EMPTY, FALSE))); - } - - public void testConstantFoldingLikes() { - assertEquals(TRUE, new ConstantFolding().rule(new Like(EMPTY, of("test_emp"), new LikePattern("test%", (char) 0))).canonical()); - assertEquals(TRUE, new ConstantFolding().rule(new WildcardLike(EMPTY, of("test_emp"), new WildcardPattern("test*"))).canonical()); - assertEquals(TRUE, new ConstantFolding().rule(new RLike(EMPTY, of("test_emp"), new RLikePattern("test.emp"))).canonical()); - } - - public void testArithmeticFolding() { - assertEquals(10, foldOperator(new Add(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); - assertEquals(4, foldOperator(new Sub(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); - assertEquals(21, foldOperator(new Mul(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); - assertEquals(2, foldOperator(new Div(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); - assertEquals(1, foldOperator(new Mod(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); - } - - private static Object foldOperator(BinaryOperator b) { - return ((Literal) new ConstantFolding().rule(b)).value(); - } - - // - // CombineDisjunction in Equals - // - public void testTwoEqualsWithOr() { - FieldAttribute fa = getFieldAttribute(); - - Or or = new Or(EMPTY, equalsOf(fa, ONE), equalsOf(fa, TWO)); - Expression e = new CombineDisjunctionsToIn().rule(or); - assertEquals(In.class, e.getClass()); - In in = (In) e; - assertEquals(fa, in.value()); - assertThat(in.list(), contains(ONE, TWO)); - } - - public void testTwoEqualsWithSameValue() { - FieldAttribute fa = getFieldAttribute(); - - Or or = new Or(EMPTY, equalsOf(fa, ONE), equalsOf(fa, ONE)); - Expression e = new CombineDisjunctionsToIn().rule(or); - assertEquals(Equals.class, e.getClass()); - Equals eq = (Equals) e; - assertEquals(fa, eq.left()); - assertEquals(ONE, eq.right()); - } - - public void testOneEqualsOneIn() { - FieldAttribute fa = getFieldAttribute(); - - Or or = new Or(EMPTY, equalsOf(fa, ONE), new In(EMPTY, fa, List.of(TWO))); - Expression e = new CombineDisjunctionsToIn().rule(or); - assertEquals(In.class, e.getClass()); - In in = (In) e; - assertEquals(fa, in.value()); - assertThat(in.list(), contains(ONE, TWO)); - } - - public void testOneEqualsOneInWithSameValue() { - FieldAttribute fa = getFieldAttribute(); - - Or or = new Or(EMPTY, equalsOf(fa, ONE), new In(EMPTY, fa, asList(ONE, TWO))); - Expression e = new CombineDisjunctionsToIn().rule(or); - assertEquals(In.class, e.getClass()); - In in = (In) e; - assertEquals(fa, in.value()); - assertThat(in.list(), contains(ONE, TWO)); - } - - public void testSingleValueInToEquals() { - FieldAttribute fa = getFieldAttribute(); - - Equals equals = equalsOf(fa, ONE); - Or or = new Or(EMPTY, equals, new In(EMPTY, fa, List.of(ONE))); - Expression e = new CombineDisjunctionsToIn().rule(or); - assertEquals(equals, e); - } - - public void testEqualsBehindAnd() { - FieldAttribute fa = getFieldAttribute(); - - And and = new And(EMPTY, equalsOf(fa, ONE), equalsOf(fa, TWO)); - Filter dummy = new Filter(EMPTY, relation(), and); - LogicalPlan transformed = new CombineDisjunctionsToIn().apply(dummy); - assertSame(dummy, transformed); - assertEquals(and, ((Filter) transformed).condition()); - } - - public void testTwoEqualsDifferentFields() { - FieldAttribute fieldOne = getFieldAttribute("ONE"); - FieldAttribute fieldTwo = getFieldAttribute("TWO"); - - Or or = new Or(EMPTY, equalsOf(fieldOne, ONE), equalsOf(fieldTwo, TWO)); - Expression e = new CombineDisjunctionsToIn().rule(or); - assertEquals(or, e); - } - - public void testMultipleIn() { - FieldAttribute fa = getFieldAttribute(); - - Or firstOr = new Or(EMPTY, new In(EMPTY, fa, List.of(ONE)), new In(EMPTY, fa, List.of(TWO))); - Or secondOr = new Or(EMPTY, firstOr, new In(EMPTY, fa, List.of(THREE))); - Expression e = new CombineDisjunctionsToIn().rule(secondOr); - assertEquals(In.class, e.getClass()); - In in = (In) e; - assertEquals(fa, in.value()); - assertThat(in.list(), contains(ONE, TWO, THREE)); - } - - public void testOrWithNonCombinableExpressions() { - FieldAttribute fa = getFieldAttribute(); - - Or firstOr = new Or(EMPTY, new In(EMPTY, fa, List.of(ONE)), lessThanOf(fa, TWO)); - Or secondOr = new Or(EMPTY, firstOr, new In(EMPTY, fa, List.of(THREE))); - Expression e = new CombineDisjunctionsToIn().rule(secondOr); - assertEquals(Or.class, e.getClass()); - Or or = (Or) e; - assertEquals(or.left(), firstOr.right()); - assertEquals(In.class, or.right().getClass()); - In in = (In) or.right(); - assertEquals(fa, in.value()); - assertThat(in.list(), contains(ONE, THREE)); - } - - // Test BooleanFunctionEqualsElimination - public void testBoolEqualsSimplificationOnExpressions() { - BooleanFunctionEqualsElimination s = new BooleanFunctionEqualsElimination(); - Expression exp = new GreaterThan(EMPTY, getFieldAttribute(), new Literal(EMPTY, 0, DataType.INTEGER), null); - - assertEquals(exp, s.rule(new Equals(EMPTY, exp, TRUE))); - // TODO: Replace use of QL Not with ESQL Not - assertEquals(new Not(EMPTY, exp), s.rule(new Equals(EMPTY, exp, FALSE))); - } - - public void testBoolEqualsSimplificationOnFields() { - BooleanFunctionEqualsElimination s = new BooleanFunctionEqualsElimination(); - - FieldAttribute field = getFieldAttribute(); - - List comparisons = asList( - new Equals(EMPTY, field, TRUE), - new Equals(EMPTY, field, FALSE), - notEqualsOf(field, TRUE), - notEqualsOf(field, FALSE), - new Equals(EMPTY, NULL, TRUE), - new Equals(EMPTY, NULL, FALSE), - notEqualsOf(NULL, TRUE), - notEqualsOf(NULL, FALSE) - ); - - for (BinaryComparison comparison : comparisons) { - assertEquals(comparison, s.rule(comparison)); - } - } - - // Test Propagate Equals - - // a == 1 AND a == 2 -> FALSE - public void testDualEqualsConjunction() { - FieldAttribute fa = getFieldAttribute(); - Equals eq1 = equalsOf(fa, ONE); - Equals eq2 = equalsOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); - assertEquals(FALSE, exp); - } - - // 1 < a < 10 AND a == 10 -> FALSE - public void testEliminateRangeByEqualsOutsideInterval() { - FieldAttribute fa = getFieldAttribute(); - Equals eq1 = equalsOf(fa, new Literal(EMPTY, 10, DataType.INTEGER)); - Range r = rangeOf(fa, ONE, false, new Literal(EMPTY, 10, DataType.INTEGER), false); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq1, r)); - assertEquals(FALSE, exp); - } - - // a != 3 AND a = 3 -> FALSE - public void testPropagateEquals_VarNeq3AndVarEq3() { - FieldAttribute fa = getFieldAttribute(); - NotEquals neq = notEqualsOf(fa, THREE); - Equals eq = equalsOf(fa, THREE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, neq, eq)); - assertEquals(FALSE, exp); - } - - // a != 4 AND a = 3 -> a = 3 - public void testPropagateEquals_VarNeq4AndVarEq3() { - FieldAttribute fa = getFieldAttribute(); - NotEquals neq = notEqualsOf(fa, FOUR); - Equals eq = equalsOf(fa, THREE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, neq, eq)); - assertEquals(Equals.class, exp.getClass()); - assertEquals(eq, exp); - } - - // a = 2 AND a < 2 -> FALSE - public void testPropagateEquals_VarEq2AndVarLt2() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - LessThan lt = lessThanOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq, lt)); - assertEquals(FALSE, exp); - } - - // a = 2 AND a <= 2 -> a = 2 - public void testPropagateEquals_VarEq2AndVarLte2() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - LessThanOrEqual lt = lessThanOrEqualOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq, lt)); - assertEquals(eq, exp); - } - - // a = 2 AND a <= 1 -> FALSE - public void testPropagateEquals_VarEq2AndVarLte1() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - LessThanOrEqual lt = lessThanOrEqualOf(fa, ONE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq, lt)); - assertEquals(FALSE, exp); - } - - // a = 2 AND a > 2 -> FALSE - public void testPropagateEquals_VarEq2AndVarGt2() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - GreaterThan gt = greaterThanOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq, gt)); - assertEquals(FALSE, exp); - } - - // a = 2 AND a >= 2 -> a = 2 - public void testPropagateEquals_VarEq2AndVarGte2() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - GreaterThanOrEqual gte = greaterThanOrEqualOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq, gte)); - assertEquals(eq, exp); - } - - // a = 2 AND a > 3 -> FALSE - public void testPropagateEquals_VarEq2AndVarLt3() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - GreaterThan gt = greaterThanOf(fa, THREE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq, gt)); - assertEquals(FALSE, exp); - } - - // a = 2 AND a < 3 AND a > 1 AND a != 4 -> a = 2 - public void testPropagateEquals_VarEq2AndVarLt3AndVarGt1AndVarNeq4() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - LessThan lt = lessThanOf(fa, THREE); - GreaterThan gt = greaterThanOf(fa, ONE); - NotEquals neq = notEqualsOf(fa, FOUR); - - PropagateEquals rule = new PropagateEquals(); - Expression and = Predicates.combineAnd(asList(eq, lt, gt, neq)); - Expression exp = rule.rule((And) and); - assertEquals(eq, exp); - } - - // a = 2 AND 1 < a < 3 AND a > 0 AND a != 4 -> a = 2 - public void testPropagateEquals_VarEq2AndVarRangeGt1Lt3AndVarGt0AndVarNeq4() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - Range range = rangeOf(fa, ONE, false, THREE, false); - GreaterThan gt = greaterThanOf(fa, new Literal(EMPTY, 0, DataType.INTEGER)); - NotEquals neq = notEqualsOf(fa, FOUR); - - PropagateEquals rule = new PropagateEquals(); - Expression and = Predicates.combineAnd(asList(eq, range, gt, neq)); - Expression exp = rule.rule((And) and); - assertEquals(eq, exp); - } - - // a = 2 OR a > 1 -> a > 1 - public void testPropagateEquals_VarEq2OrVarGt1() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - GreaterThan gt = greaterThanOf(fa, ONE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, gt)); - assertEquals(gt, exp); - } - - // a = 2 OR a > 2 -> a >= 2 - public void testPropagateEquals_VarEq2OrVarGte2() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - GreaterThan gt = greaterThanOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, gt)); - assertEquals(GreaterThanOrEqual.class, exp.getClass()); - GreaterThanOrEqual gte = (GreaterThanOrEqual) exp; - assertEquals(TWO, gte.right()); - } - - // a = 2 OR a < 3 -> a < 3 - public void testPropagateEquals_VarEq2OrVarLt3() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - LessThan lt = lessThanOf(fa, THREE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, lt)); - assertEquals(lt, exp); - } - - // a = 3 OR a < 3 -> a <= 3 - public void testPropagateEquals_VarEq3OrVarLt3() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, THREE); - LessThan lt = lessThanOf(fa, THREE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, lt)); - assertEquals(LessThanOrEqual.class, exp.getClass()); - LessThanOrEqual lte = (LessThanOrEqual) exp; - assertEquals(THREE, lte.right()); - } - - // a = 2 OR 1 < a < 3 -> 1 < a < 3 - public void testPropagateEquals_VarEq2OrVarRangeGt1Lt3() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - Range range = rangeOf(fa, ONE, false, THREE, false); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, range)); - assertEquals(range, exp); - } - - // a = 2 OR 2 < a < 3 -> 2 <= a < 3 - public void testPropagateEquals_VarEq2OrVarRangeGt2Lt3() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - Range range = rangeOf(fa, TWO, false, THREE, false); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, range)); - assertEquals(Range.class, exp.getClass()); - Range r = (Range) exp; - assertEquals(TWO, r.lower()); - assertTrue(r.includeLower()); - assertEquals(THREE, r.upper()); - assertFalse(r.includeUpper()); - } - - // a = 3 OR 2 < a < 3 -> 2 < a <= 3 - public void testPropagateEquals_VarEq3OrVarRangeGt2Lt3() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, THREE); - Range range = rangeOf(fa, TWO, false, THREE, false); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, range)); - assertEquals(Range.class, exp.getClass()); - Range r = (Range) exp; - assertEquals(TWO, r.lower()); - assertFalse(r.includeLower()); - assertEquals(THREE, r.upper()); - assertTrue(r.includeUpper()); - } - - // a = 2 OR a != 2 -> TRUE - public void testPropagateEquals_VarEq2OrVarNeq2() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - NotEquals neq = notEqualsOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, neq)); - assertEquals(TRUE, exp); - } - - // a = 2 OR a != 5 -> a != 5 - public void testPropagateEquals_VarEq2OrVarNeq5() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - NotEquals neq = notEqualsOf(fa, FIVE); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new Or(EMPTY, eq, neq)); - assertEquals(NotEquals.class, exp.getClass()); - NotEquals ne = (NotEquals) exp; - assertEquals(FIVE, ne.right()); - } - - // a = 2 OR 3 < a < 4 OR a > 2 OR a!= 2 -> TRUE - public void testPropagateEquals_VarEq2OrVarRangeGt3Lt4OrVarGt2OrVarNe2() { - FieldAttribute fa = getFieldAttribute(); - Equals eq = equalsOf(fa, TWO); - Range range = rangeOf(fa, THREE, false, FOUR, false); - GreaterThan gt = greaterThanOf(fa, TWO); - NotEquals neq = notEqualsOf(fa, TWO); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule((Or) Predicates.combineOr(asList(eq, range, neq, gt))); - assertEquals(TRUE, exp); - } - - // a == 1 AND a == 2 -> nop for date/time fields - public void testPropagateEquals_ignoreDateTimeFields() { - FieldAttribute fa = getFieldAttribute("a", DataType.DATETIME); - Equals eq1 = equalsOf(fa, ONE); - Equals eq2 = equalsOf(fa, TWO); - And and = new And(EMPTY, eq1, eq2); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(and); - assertEquals(and, exp); - } - - // 1 <= a < 10 AND a == 1 -> a == 1 - public void testEliminateRangeByEqualsInInterval() { - FieldAttribute fa = getFieldAttribute(); - Equals eq1 = equalsOf(fa, ONE); - Range r = rangeOf(fa, ONE, true, new Literal(EMPTY, 10, DataType.INTEGER), false); - - PropagateEquals rule = new PropagateEquals(); - Expression exp = rule.rule(new And(EMPTY, eq1, r)); - assertEquals(eq1, exp); - } - // - // Null folding - - public void testNullFoldingIsNull() { - FoldNull foldNull = new FoldNull(); - assertEquals(true, foldNull.rule(new IsNull(EMPTY, NULL)).fold()); - assertEquals(false, foldNull.rule(new IsNull(EMPTY, TRUE)).fold()); - } - - public void testGenericNullableExpression() { - FoldNull rule = new FoldNull(); - // arithmetic - assertNullLiteral(rule.rule(new Add(EMPTY, getFieldAttribute(), NULL))); - // comparison - assertNullLiteral(rule.rule(greaterThanOf(getFieldAttribute(), NULL))); - // regex - assertNullLiteral(rule.rule(new RLike(EMPTY, NULL, new RLikePattern("123")))); - } - - public void testNullFoldingDoesNotApplyOnLogicalExpressions() { - org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.FoldNull rule = - new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.FoldNull(); - - Or or = new Or(EMPTY, NULL, TRUE); - assertEquals(or, rule.rule(or)); - or = new Or(EMPTY, NULL, NULL); - assertEquals(or, rule.rule(or)); - - And and = new And(EMPTY, NULL, TRUE); - assertEquals(and, rule.rule(and)); - and = new And(EMPTY, NULL, NULL); - assertEquals(and, rule.rule(and)); - } - - // - // Propagate nullability (IS NULL / IS NOT NULL) - // - - // a IS NULL AND a IS NOT NULL => false - public void testIsNullAndNotNull() { - FieldAttribute fa = getFieldAttribute(); - - And and = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); - assertEquals(FALSE, new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.PropagateNullable().rule(and)); - } - - // a IS NULL AND b IS NOT NULL AND c IS NULL AND d IS NOT NULL AND e IS NULL AND a IS NOT NULL => false - public void testIsNullAndNotNullMultiField() { - FieldAttribute fa = getFieldAttribute(); - - And andOne = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, getFieldAttribute())); - And andTwo = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute()), new IsNotNull(EMPTY, getFieldAttribute())); - And andThree = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute()), new IsNotNull(EMPTY, fa)); - - And and = new And(EMPTY, andOne, new And(EMPTY, andThree, andTwo)); - - assertEquals(FALSE, new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.PropagateNullable().rule(and)); - } - - // a IS NULL AND a > 1 => a IS NULL AND false - public void testIsNullAndComparison() { - FieldAttribute fa = getFieldAttribute(); - IsNull isNull = new IsNull(EMPTY, fa); - - And and = new And(EMPTY, isNull, greaterThanOf(fa, ONE)); - assertEquals(new And(EMPTY, isNull, nullOf(BOOLEAN)), new PropagateNullable().rule(and)); - } - - // a IS NULL AND b < 1 AND c < 1 AND a < 1 => a IS NULL AND b < 1 AND c < 1 => a IS NULL AND b < 1 AND c < 1 - public void testIsNullAndMultipleComparison() { - FieldAttribute fa = getFieldAttribute(); - IsNull isNull = new IsNull(EMPTY, fa); - - And nestedAnd = new And(EMPTY, lessThanOf(getFieldAttribute("b"), ONE), lessThanOf(getFieldAttribute("c"), ONE)); - And and = new And(EMPTY, isNull, nestedAnd); - And top = new And(EMPTY, and, lessThanOf(fa, ONE)); - - Expression optimized = new PropagateNullable().rule(top); - Expression expected = new And(EMPTY, and, nullOf(BOOLEAN)); - assertEquals(Predicates.splitAnd(expected), Predicates.splitAnd(optimized)); - } - - // ((a+1)/2) > 1 AND a + 2 AND a IS NULL AND b < 3 => NULL AND NULL AND a IS NULL AND b < 3 - public void testIsNullAndDeeplyNestedExpression() { - FieldAttribute fa = getFieldAttribute(); - IsNull isNull = new IsNull(EMPTY, fa); - - Expression nullified = new And( - EMPTY, - greaterThanOf(new Div(EMPTY, new Add(EMPTY, fa, ONE), TWO), ONE), - greaterThanOf(new Add(EMPTY, fa, TWO), ONE) - ); - Expression kept = new And(EMPTY, isNull, lessThanOf(getFieldAttribute("b"), THREE)); - And and = new And(EMPTY, nullified, kept); - - Expression optimized = new PropagateNullable().rule(and); - Expression expected = new And(EMPTY, new And(EMPTY, nullOf(BOOLEAN), nullOf(BOOLEAN)), kept); - - assertEquals(Predicates.splitAnd(expected), Predicates.splitAnd(optimized)); - } - - // a IS NULL OR a IS NOT NULL => no change - // a IS NULL OR a > 1 => no change - public void testIsNullInDisjunction() { - FieldAttribute fa = getFieldAttribute(); - - Or or = new Or(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); - Filter dummy = new Filter(EMPTY, relation(), or); - LogicalPlan transformed = new PropagateNullable().apply(dummy); - assertSame(dummy, transformed); - assertEquals(or, ((Filter) transformed).condition()); - - or = new Or(EMPTY, new IsNull(EMPTY, fa), greaterThanOf(fa, ONE)); - dummy = new Filter(EMPTY, relation(), or); - transformed = new PropagateNullable().apply(dummy); - assertSame(dummy, transformed); - assertEquals(or, ((Filter) transformed).condition()); - } - - // a + 1 AND (a IS NULL OR a > 3) => no change - public void testIsNullDisjunction() { - FieldAttribute fa = getFieldAttribute(); - IsNull isNull = new IsNull(EMPTY, fa); - - Or or = new Or(EMPTY, isNull, greaterThanOf(fa, THREE)); - And and = new And(EMPTY, new Add(EMPTY, fa, ONE), or); - - assertEquals(and, new PropagateNullable().rule(and)); - } - - // - // Like / Regex - // - public void testMatchAllLikeToExist() { - for (String s : asList("%", "%%", "%%%")) { - LikePattern pattern = new LikePattern(s, (char) 0); - FieldAttribute fa = getFieldAttribute(); - Like l = new Like(EMPTY, fa, pattern); - Expression e = new ReplaceRegexMatch().rule(l); - assertEquals(IsNotNull.class, e.getClass()); - IsNotNull inn = (IsNotNull) e; - assertEquals(fa, inn.field()); - } - } - - public void testMatchAllWildcardLikeToExist() { - for (String s : asList("*", "**", "***")) { - WildcardPattern pattern = new WildcardPattern(s); - FieldAttribute fa = getFieldAttribute(); - WildcardLike l = new WildcardLike(EMPTY, fa, pattern); - Expression e = new ReplaceRegexMatch().rule(l); - assertEquals(IsNotNull.class, e.getClass()); - IsNotNull inn = (IsNotNull) e; - assertEquals(fa, inn.field()); - } - } - - public void testMatchAllRLikeToExist() { - RLikePattern pattern = new RLikePattern(".*"); - FieldAttribute fa = getFieldAttribute(); - RLike l = new RLike(EMPTY, fa, pattern); - Expression e = new ReplaceRegexMatch().rule(l); - assertEquals(IsNotNull.class, e.getClass()); - IsNotNull inn = (IsNotNull) e; - assertEquals(fa, inn.field()); - } - - public void testExactMatchLike() { - for (String s : asList("ab", "ab0%", "ab0_c")) { - LikePattern pattern = new LikePattern(s, '0'); - FieldAttribute fa = getFieldAttribute(); - Like l = new Like(EMPTY, fa, pattern); - Expression e = new ReplaceRegexMatch().rule(l); - assertEquals(Equals.class, e.getClass()); - Equals eq = (Equals) e; - assertEquals(fa, eq.left()); - assertEquals(s.replace("0", StringUtils.EMPTY), eq.right().fold()); - } - } - - public void testExactMatchWildcardLike() { - String s = "ab"; - WildcardPattern pattern = new WildcardPattern(s); - FieldAttribute fa = getFieldAttribute(); - WildcardLike l = new WildcardLike(EMPTY, fa, pattern); - Expression e = new ReplaceRegexMatch().rule(l); - assertEquals(Equals.class, e.getClass()); - Equals eq = (Equals) e; - assertEquals(fa, eq.left()); - assertEquals(s, eq.right().fold()); - } - - public void testExactMatchRLike() { - RLikePattern pattern = new RLikePattern("abc"); - FieldAttribute fa = getFieldAttribute(); - RLike l = new RLike(EMPTY, fa, pattern); - Expression e = new ReplaceRegexMatch().rule(l); - assertEquals(Equals.class, e.getClass()); - Equals eq = (Equals) e; - assertEquals(fa, eq.left()); - assertEquals("abc", eq.right().fold()); - } - - private void assertNullLiteral(Expression expression) { - assertEquals(Literal.class, expression.getClass()); - assertNull(expression.fold()); - } - - private IsNotNull isNotNull(Expression field) { - return new IsNotNull(EMPTY, field); - } - - private IsNull isNull(Expression field) { - return new IsNull(EMPTY, field); - } - - private Literal nullOf(DataType dataType) { - return new Literal(Source.EMPTY, null, dataType); - } - // - // Logical simplifications - // - - public void testLiteralsOnTheRight() { - Alias a = new Alias(EMPTY, "a", new Literal(EMPTY, 10, INTEGER)); - Expression result = new LiteralsOnTheRight().rule(equalsOf(FIVE, a)); - assertTrue(result instanceof Equals); - Equals eq = (Equals) result; - assertEquals(a, eq.left()); - assertEquals(FIVE, eq.right()); - - // Note: Null Equals test removed here - } - - public void testBoolSimplifyOr() { - org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.BooleanSimplification simplification = - new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.BooleanSimplification(); - - assertEquals(TRUE, simplification.rule(new Or(EMPTY, TRUE, TRUE))); - assertEquals(TRUE, simplification.rule(new Or(EMPTY, TRUE, DUMMY_EXPRESSION))); - assertEquals(TRUE, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, TRUE))); - - assertEquals(FALSE, simplification.rule(new Or(EMPTY, FALSE, FALSE))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, FALSE, DUMMY_EXPRESSION))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, FALSE))); - } - - public void testBoolSimplifyAnd() { - org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.BooleanSimplification simplification = - new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.BooleanSimplification(); - - assertEquals(TRUE, simplification.rule(new And(EMPTY, TRUE, TRUE))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, TRUE, DUMMY_EXPRESSION))); - assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, TRUE))); - - assertEquals(FALSE, simplification.rule(new And(EMPTY, FALSE, FALSE))); - assertEquals(FALSE, simplification.rule(new And(EMPTY, FALSE, DUMMY_EXPRESSION))); - assertEquals(FALSE, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, FALSE))); - } - - public void testBoolCommonFactorExtraction() { - org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.BooleanSimplification simplification = - new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRules.BooleanSimplification(); - - Expression a1 = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 1); - Expression a2 = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 1); - Expression b = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 2); - Expression c = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 3); - - Or actual = new Or(EMPTY, new And(EMPTY, a1, b), new And(EMPTY, a2, c)); - And expected = new And(EMPTY, a1, new Or(EMPTY, b, c)); - - assertEquals(expected, simplification.rule(actual)); - } -} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index bc70ce64944d1..a99ce5d873b44 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -9,11 +9,13 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.Build; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; +import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Polygon; import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.index.IndexMode; @@ -33,21 +35,19 @@ import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; @@ -63,7 +63,9 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; @@ -74,6 +76,9 @@ import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.TopN; import org.elasticsearch.xpack.esql.plan.logical.join.Join; @@ -107,10 +112,10 @@ import org.elasticsearch.xpack.esql.querydsl.query.SpatialRelatesQuery; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.stats.SearchStats; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.junit.Before; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -131,6 +136,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.statsForMissingField; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; import static org.elasticsearch.xpack.esql.core.expression.Expressions.name; import static org.elasticsearch.xpack.esql.core.expression.Expressions.names; import static org.elasticsearch.xpack.esql.core.expression.Order.OrderDirection.ASC; @@ -199,7 +205,7 @@ public void init() { parser = new EsqlParser(); logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); - FunctionRegistry functionRegistry = new EsqlFunctionRegistry(); + EsqlFunctionRegistry functionRegistry = new EsqlFunctionRegistry(); mapper = new Mapper(functionRegistry); var enrichResolution = setupEnrichResolution(); // Most tests used data from the test index, so we load it here, and use it in the plan() function. @@ -207,11 +213,11 @@ public void init() { allFieldRowSize = testData.mapping.values() .stream() .mapToInt( - f -> (EstimatesRowSize.estimateSize(EsqlDataTypes.widenSmallNumericTypes(f.getDataType())) + f.getProperties() + f -> (EstimatesRowSize.estimateSize(f.getDataType().widenSmallNumeric()) + f.getProperties() .values() .stream() // check one more level since the mapping contains TEXT fields with KEYWORD multi-fields - .mapToInt(x -> EstimatesRowSize.estimateSize(EsqlDataTypes.widenSmallNumericTypes(x.getDataType()))) + .mapToInt(x -> EstimatesRowSize.estimateSize(x.getDataType().widenSmallNumeric())) .sum()) ) .sum(); @@ -231,7 +237,7 @@ public void init() { TestDataSource makeTestDataSource( String indexName, String mappingFileName, - FunctionRegistry functionRegistry, + EsqlFunctionRegistry functionRegistry, EnrichResolution enrichResolution ) { Map mapping = loadMapping(mappingFileName); @@ -3476,6 +3482,234 @@ public void testPushSpatialIntersectsShapeToSource() { } } + public void testPushSpatialDistanceToSource() { + for (String distanceFunction : new String[] { + "ST_DISTANCE(location, TO_GEOPOINT(\"POINT(12.565 55.673)\"))", + "ST_DISTANCE(TO_GEOPOINT(\"POINT(12.565 55.673)\"), location)" }) { + + for (boolean reverse : new Boolean[] { false, true }) { + for (String op : new String[] { "<", "<=", ">", ">=", "==" }) { + var expected = ExpectedComparison.from(op, reverse, 600000.0); + var predicate = reverse ? "600000 " + op + " " + distanceFunction : distanceFunction + " " + op + " 600000"; + var query = "FROM airports | WHERE " + predicate + " AND scalerank > 1"; + var plan = this.physicalPlan(query, airports); + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var limit2 = as(fragment.fragment(), Limit.class); + var filter = as(limit2.child(), Filter.class); + var and = as(filter.condition(), And.class); + var comp = as(and.left(), EsqlBinaryComparison.class); + assertThat("filter contains expected binary comparison for " + predicate, comp, instanceOf(expected.comp)); + assertThat("filter contains ST_DISTANCE", comp.left(), instanceOf(StDistance.class)); + + var optimized = optimizedPlan(plan); + var topLimit = as(optimized, LimitExec.class); + exchange = as(topLimit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var source = source(fieldExtract.child()); + var bool = as(source.query(), BoolQueryBuilder.class); + var rangeQueryBuilders = bool.filter().stream().filter(p -> p instanceof SingleValueQuery.Builder).toList(); + assertThat("Expected one range query builder", rangeQueryBuilders.size(), equalTo(1)); + assertThat(((SingleValueQuery.Builder) rangeQueryBuilders.get(0)).field(), equalTo("scalerank")); + if (op.equals("==")) { + var boolQueryBuilders = bool.filter().stream().filter(p -> p instanceof BoolQueryBuilder).toList(); + assertThat("Expected one sub-bool query builder", boolQueryBuilders.size(), equalTo(1)); + var bool2 = as(boolQueryBuilders.get(0), BoolQueryBuilder.class); + var shapeQueryBuilders = bool2.must() + .stream() + .filter(p -> p instanceof SpatialRelatesQuery.ShapeQueryBuilder) + .toList(); + assertShapeQueryRange(shapeQueryBuilders, Math.nextDown(expected.value), expected.value); + } else { + var shapeQueryBuilders = bool.filter() + .stream() + .filter(p -> p instanceof SpatialRelatesQuery.ShapeQueryBuilder) + .toList(); + assertThat("Expected one shape query builder", shapeQueryBuilders.size(), equalTo(1)); + var condition = as(shapeQueryBuilders.get(0), SpatialRelatesQuery.ShapeQueryBuilder.class); + assertThat("Geometry field name", condition.fieldName(), equalTo("location")); + assertThat("Spatial relationship", condition.relation(), equalTo(expected.shapeRelation())); + assertThat("Geometry is Circle", condition.shape().type(), equalTo(ShapeType.CIRCLE)); + var circle = as(condition.shape(), Circle.class); + assertThat("Circle center-x", circle.getX(), equalTo(12.565)); + assertThat("Circle center-y", circle.getY(), equalTo(55.673)); + assertThat("Circle radius for predicate " + predicate, circle.getRadiusMeters(), equalTo(expected.value)); + } + } + } + } + } + + public void testPushSpatialDistanceBandToSource() { + var query = """ + FROM airports + | WHERE ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 600000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 400000 + """; + var plan = this.physicalPlan(query, airports); + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var limit2 = as(fragment.fragment(), Limit.class); + var filter = as(limit2.child(), Filter.class); + var and = as(filter.condition(), And.class); + for (Expression expression : and.arguments()) { + var comp = as(expression, EsqlBinaryComparison.class); + var expectedComp = comp.equals(and.left()) ? LessThanOrEqual.class : GreaterThanOrEqual.class; + assertThat("filter contains expected binary comparison", comp, instanceOf(expectedComp)); + assertThat("filter contains ST_DISTANCE", comp.left(), instanceOf(StDistance.class)); + } + + var optimized = optimizedPlan(plan); + var topLimit = as(optimized, LimitExec.class); + exchange = as(topLimit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var source = source(fieldExtract.child()); + var bool = as(source.query(), BoolQueryBuilder.class); + var rangeQueryBuilders = bool.filter().stream().filter(p -> p instanceof SingleValueQuery.Builder).toList(); + assertThat("Expected zero range query builder", rangeQueryBuilders.size(), equalTo(0)); + var shapeQueryBuilders = bool.must().stream().filter(p -> p instanceof SpatialRelatesQuery.ShapeQueryBuilder).toList(); + assertShapeQueryRange(shapeQueryBuilders, 400000.0, 600000.0); + } + + public void testPushSpatialDistanceDisjointBandsToSource() { + var query = """ + FROM airports + | WHERE (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 600000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 400000) + OR + (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 300000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 200000) + """; + var plan = this.physicalPlan(query, airports); + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var limit2 = as(fragment.fragment(), Limit.class); + var filter = as(limit2.child(), Filter.class); + var or = as(filter.condition(), Or.class); + assertThat("OR has two predicates", or.arguments().size(), equalTo(2)); + for (Expression expression : or.arguments()) { + var and = as(expression, And.class); + for (Expression exp : and.arguments()) { + var comp = as(exp, EsqlBinaryComparison.class); + var expectedComp = comp.equals(and.left()) ? LessThanOrEqual.class : GreaterThanOrEqual.class; + assertThat("filter contains expected binary comparison", comp, instanceOf(expectedComp)); + assertThat("filter contains ST_DISTANCE", comp.left(), instanceOf(StDistance.class)); + } + } + + var optimized = optimizedPlan(plan); + var topLimit = as(optimized, LimitExec.class); + exchange = as(topLimit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var source = source(fieldExtract.child()); + var bool = as(source.query(), BoolQueryBuilder.class); + var disjuntiveQueryBuilders = bool.should().stream().filter(p -> p instanceof BoolQueryBuilder).toList(); + assertThat("Expected two disjunctive query builders", disjuntiveQueryBuilders.size(), equalTo(2)); + for (int i = 0; i < disjuntiveQueryBuilders.size(); i++) { + var subRangeBool = as(disjuntiveQueryBuilders.get(i), BoolQueryBuilder.class); + var shapeQueryBuilders = subRangeBool.must().stream().filter(p -> p instanceof SpatialRelatesQuery.ShapeQueryBuilder).toList(); + assertShapeQueryRange(shapeQueryBuilders, i == 0 ? 400000.0 : 200000.0, i == 0 ? 600000.0 : 300000.0); + } + } + + public void testPushSpatialDistanceComplexPredicateToSource() { + var query = """ + FROM airports + | WHERE ((ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 600000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 400000 + AND NOT (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 500000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 430000)) + OR (ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) <= 300000 + AND ST_DISTANCE(location, TO_GEOPOINT("POINT(12.565 55.673)")) >= 200000)) + AND NOT abbrev == "PLQ" + AND scalerank < 6 + """; + var plan = this.physicalPlan(query, airports); + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var limit2 = as(fragment.fragment(), Limit.class); + var filter = as(limit2.child(), Filter.class); + var outerAnd = as(filter.condition(), And.class); + var outerLeft = as(outerAnd.left(), And.class); + as(outerLeft.right(), Not.class); + as(outerAnd.right(), LessThan.class); + var or = as(outerLeft.left(), Or.class); + var innerAnd1 = as(or.left(), And.class); + var innerAnd2 = as(or.right(), And.class); + for (Expression exp : innerAnd2.arguments()) { + var comp = as(exp, EsqlBinaryComparison.class); + var expectedComp = comp.equals(innerAnd2.left()) ? LessThanOrEqual.class : GreaterThanOrEqual.class; + assertThat("filter contains expected binary comparison", comp, instanceOf(expectedComp)); + assertThat("filter contains ST_DISTANCE", comp.left(), instanceOf(StDistance.class)); + } + + var optimized = optimizedPlan(plan); + var topLimit = as(optimized, LimitExec.class); + exchange = as(topLimit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var source = source(fieldExtract.child()); + var bool = as(source.query(), BoolQueryBuilder.class); + assertThat("Expected boolean query of three MUST clauses", bool.must().size(), equalTo(2)); + assertThat("Expected boolean query of one FILTER clause", bool.filter().size(), equalTo(1)); + var boolDisjuntive = as(bool.filter().get(0), BoolQueryBuilder.class); + var disjuntiveQueryBuilders = boolDisjuntive.should().stream().filter(p -> p instanceof BoolQueryBuilder).toList(); + assertThat("Expected two disjunctive query builders", disjuntiveQueryBuilders.size(), equalTo(2)); + for (int i = 0; i < disjuntiveQueryBuilders.size(); i++) { + var subRangeBool = as(disjuntiveQueryBuilders.get(i), BoolQueryBuilder.class); + var shapeQueryBuilders = subRangeBool.must().stream().filter(p -> p instanceof SpatialRelatesQuery.ShapeQueryBuilder).toList(); + assertShapeQueryRange(shapeQueryBuilders, i == 0 ? 400000.0 : 200000.0, i == 0 ? 600000.0 : 300000.0); + } + } + + private void assertShapeQueryRange(List shapeQueryBuilders, double min, double max) { + assertThat("Expected two shape query builders", shapeQueryBuilders.size(), equalTo(2)); + var relationStats = new HashMap(); + for (var builder : shapeQueryBuilders) { + var condition = as(builder, SpatialRelatesQuery.ShapeQueryBuilder.class); + var expected = condition.relation() == ShapeRelation.INTERSECTS ? max : min; + relationStats.compute(condition.relation(), (r, c) -> c == null ? 1 : c + 1); + assertThat("Geometry field name", condition.fieldName(), equalTo("location")); + assertThat("Geometry is Circle", condition.shape().type(), equalTo(ShapeType.CIRCLE)); + var circle = as(condition.shape(), Circle.class); + assertThat("Circle center-x", circle.getX(), equalTo(12.565)); + assertThat("Circle center-y", circle.getY(), equalTo(55.673)); + assertThat("Circle radius for shape relation " + condition.relation(), circle.getRadiusMeters(), equalTo(expected)); + } + assertThat("Expected one INTERSECTS and one DISJOINT", relationStats.size(), equalTo(2)); + assertThat("Expected one INTERSECTS", relationStats.get(ShapeRelation.INTERSECTS), equalTo(1)); + assertThat("Expected one DISJOINT", relationStats.get(ShapeRelation.DISJOINT), equalTo(1)); + } + + private record ExpectedComparison(Class comp, double value) { + ShapeRelation shapeRelation() { + return comp.getSimpleName().startsWith("GreaterThan") ? ShapeRelation.DISJOINT : ShapeRelation.INTERSECTS; + } + + static ExpectedComparison from(String op, boolean reverse, double value) { + double up = Math.nextUp(value); + double down = Math.nextDown(value); + return switch (op) { + case "<" -> reverse ? from(GreaterThan.class, up) : from(LessThan.class, down); + case "<=" -> reverse ? from(GreaterThanOrEqual.class, value) : from(LessThanOrEqual.class, value); + case ">" -> reverse ? from(LessThan.class, down) : from(GreaterThan.class, up); + case ">=" -> reverse ? from(LessThanOrEqual.class, value) : from(GreaterThanOrEqual.class, value); + default -> from(Equals.class, value); + }; + } + + static ExpectedComparison from(Class comp, double value) { + return new ExpectedComparison(comp, value); + } + } + public void testPushCartesianSpatialIntersectsToSource() { for (String query : new String[] { """ FROM airports_web @@ -4093,10 +4327,16 @@ public void testMaxQueryDepthPlusExpressionDepth() { } public void testLookupSimple() { - PhysicalPlan plan = physicalPlan(""" - FROM test | - RENAME languages AS int | - LOOKUP int_number_names ON int"""); + String query = """ + FROM test + | RENAME languages AS int + | LOOKUP int_number_names ON int"""; + if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> analyze(query)); + assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); + return; + } + PhysicalPlan plan = physicalPlan(query); var join = as(plan, HashJoinExec.class); assertMap(join.matchFields().stream().map(Object::toString).toList(), matchesList().item(startsWith("int{r}"))); assertMap( @@ -4132,14 +4372,20 @@ public void testLookupSimple() { * } */ public void testLookupThenProject() { - PhysicalPlan plan = optimizedPlan(physicalPlan(""" + String query = """ FROM employees | SORT emp_no | LIMIT 4 | RENAME languages AS int | LOOKUP int_number_names ON int | RENAME int AS languages, name AS lang_name - | KEEP emp_no, languages, lang_name""")); + | KEEP emp_no, languages, lang_name"""; + if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> analyze(query)); + assertThat(e.getMessage(), containsString("line 5:4: LOOKUP is in preview and only available in SNAPSHOT build")); + return; + } + PhysicalPlan plan = optimizedPlan(physicalPlan(query)); var outerProject = as(plan, ProjectExec.class); assertThat(outerProject.projections().toString(), containsString("AS lang_name")); @@ -4184,14 +4430,19 @@ public void testLookupThenProject() { * } */ public void testLookupThenTopN() { - var plan = physicalPlan(""" - FROM employees - | RENAME languages AS int - | LOOKUP int_number_names ON int - | RENAME name AS languages - | KEEP languages, emp_no - | SORT languages ASC, emp_no ASC - """); + String query = """ + FROM employees + | RENAME languages AS int + | LOOKUP int_number_names ON int + | RENAME name AS languages + | KEEP languages, emp_no + | SORT languages ASC, emp_no ASC"""; + if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> analyze(query)); + assertThat(e.getMessage(), containsString("line 3:4: LOOKUP is in preview and only available in SNAPSHOT build")); + return; + } + var plan = physicalPlan(query); ProjectExec outerProject = as(plan, ProjectExec.class); TopNExec outerTopN = as(outerProject.child(), TopNExec.class); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsEliminationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsEliminationTests.java new file mode 100644 index 0000000000000..d5d274d0fc62f --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanFunctionEqualsEliminationTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; +import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; + +import java.util.List; + +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.notEqualsOf; +import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; +import static org.elasticsearch.xpack.esql.core.expression.Literal.NULL; +import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; + +public class BooleanFunctionEqualsEliminationTests extends ESTestCase { + + public void testBoolEqualsSimplificationOnExpressions() { + BooleanFunctionEqualsElimination s = new BooleanFunctionEqualsElimination(); + Expression exp = new GreaterThan(EMPTY, getFieldAttribute(), new Literal(EMPTY, 0, DataType.INTEGER), null); + + assertEquals(exp, s.rule(new Equals(EMPTY, exp, TRUE))); + // TODO: Replace use of QL Not with ESQL Not + assertEquals(new Not(EMPTY, exp), s.rule(new Equals(EMPTY, exp, FALSE))); + } + + public void testBoolEqualsSimplificationOnFields() { + BooleanFunctionEqualsElimination s = new BooleanFunctionEqualsElimination(); + + FieldAttribute field = getFieldAttribute(); + + List comparisons = asList( + new Equals(EMPTY, field, TRUE), + new Equals(EMPTY, field, FALSE), + notEqualsOf(field, TRUE), + notEqualsOf(field, FALSE), + new Equals(EMPTY, NULL, TRUE), + new Equals(EMPTY, NULL, FALSE), + notEqualsOf(NULL, TRUE), + notEqualsOf(NULL, FALSE) + ); + + for (BinaryComparison comparison : comparisons) { + assertEquals(comparison, s.rule(comparison)); + } + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplificationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplificationTests.java new file mode 100644 index 0000000000000..03cd5921a80e2 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/BooleanSimplificationTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; + +import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; +import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; + +public class BooleanSimplificationTests extends ESTestCase { + private static final Expression DUMMY_EXPRESSION = + new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 0); + + public void testBoolSimplifyOr() { + OptimizerRules.BooleanSimplification simplification = new OptimizerRules.BooleanSimplification(); + + assertEquals(TRUE, simplification.rule(new Or(EMPTY, TRUE, TRUE))); + assertEquals(TRUE, simplification.rule(new Or(EMPTY, TRUE, DUMMY_EXPRESSION))); + assertEquals(TRUE, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, TRUE))); + + assertEquals(FALSE, simplification.rule(new Or(EMPTY, FALSE, FALSE))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, FALSE, DUMMY_EXPRESSION))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, FALSE))); + } + + public void testBoolSimplifyAnd() { + OptimizerRules.BooleanSimplification simplification = new OptimizerRules.BooleanSimplification(); + + assertEquals(TRUE, simplification.rule(new And(EMPTY, TRUE, TRUE))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, TRUE, DUMMY_EXPRESSION))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, TRUE))); + + assertEquals(FALSE, simplification.rule(new And(EMPTY, FALSE, FALSE))); + assertEquals(FALSE, simplification.rule(new And(EMPTY, FALSE, DUMMY_EXPRESSION))); + assertEquals(FALSE, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, FALSE))); + } + + public void testBoolCommonFactorExtraction() { + OptimizerRules.BooleanSimplification simplification = new OptimizerRules.BooleanSimplification(); + + Expression a1 = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 1); + Expression a2 = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 1); + Expression b = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 2); + Expression c = new org.elasticsearch.xpack.esql.core.optimizer.OptimizerRulesTests.DummyBooleanExpression(EMPTY, 3); + + Or actual = new Or(EMPTY, new And(EMPTY, a1, b), new And(EMPTY, a2, c)); + And expected = new And(EMPTY, a1, new Or(EMPTY, b, c)); + + assertEquals(expected, simplification.rule(actual)); + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsToInTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsToInTests.java new file mode 100644 index 0000000000000..7bc2d69cb56e6 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/CombineDisjunctionsToInTests.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.util.List; + +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.ONE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.THREE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.TWO; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.equalsOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.relation; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; +import static org.hamcrest.Matchers.contains; + +public class CombineDisjunctionsToInTests extends ESTestCase { + public void testTwoEqualsWithOr() { + FieldAttribute fa = getFieldAttribute(); + + Or or = new Or(EMPTY, equalsOf(fa, ONE), equalsOf(fa, TWO)); + Expression e = new CombineDisjunctionsToIn().rule(or); + assertEquals(In.class, e.getClass()); + In in = (In) e; + assertEquals(fa, in.value()); + assertThat(in.list(), contains(ONE, TWO)); + } + + public void testTwoEqualsWithSameValue() { + FieldAttribute fa = getFieldAttribute(); + + Or or = new Or(EMPTY, equalsOf(fa, ONE), equalsOf(fa, ONE)); + Expression e = new CombineDisjunctionsToIn().rule(or); + assertEquals(Equals.class, e.getClass()); + Equals eq = (Equals) e; + assertEquals(fa, eq.left()); + assertEquals(ONE, eq.right()); + } + + public void testOneEqualsOneIn() { + FieldAttribute fa = getFieldAttribute(); + + Or or = new Or(EMPTY, equalsOf(fa, ONE), new In(EMPTY, fa, List.of(TWO))); + Expression e = new CombineDisjunctionsToIn().rule(or); + assertEquals(In.class, e.getClass()); + In in = (In) e; + assertEquals(fa, in.value()); + assertThat(in.list(), contains(ONE, TWO)); + } + + public void testOneEqualsOneInWithSameValue() { + FieldAttribute fa = getFieldAttribute(); + + Or or = new Or(EMPTY, equalsOf(fa, ONE), new In(EMPTY, fa, asList(ONE, TWO))); + Expression e = new CombineDisjunctionsToIn().rule(or); + assertEquals(In.class, e.getClass()); + In in = (In) e; + assertEquals(fa, in.value()); + assertThat(in.list(), contains(ONE, TWO)); + } + + public void testSingleValueInToEquals() { + FieldAttribute fa = getFieldAttribute(); + + Equals equals = equalsOf(fa, ONE); + Or or = new Or(EMPTY, equals, new In(EMPTY, fa, List.of(ONE))); + Expression e = new CombineDisjunctionsToIn().rule(or); + assertEquals(equals, e); + } + + public void testEqualsBehindAnd() { + FieldAttribute fa = getFieldAttribute(); + + And and = new And(EMPTY, equalsOf(fa, ONE), equalsOf(fa, TWO)); + Filter dummy = new Filter(EMPTY, relation(), and); + LogicalPlan transformed = new CombineDisjunctionsToIn().apply(dummy); + assertSame(dummy, transformed); + assertEquals(and, ((Filter) transformed).condition()); + } + + public void testTwoEqualsDifferentFields() { + FieldAttribute fieldOne = getFieldAttribute("ONE"); + FieldAttribute fieldTwo = getFieldAttribute("TWO"); + + Or or = new Or(EMPTY, equalsOf(fieldOne, ONE), equalsOf(fieldTwo, TWO)); + Expression e = new CombineDisjunctionsToIn().rule(or); + assertEquals(or, e); + } + + public void testMultipleIn() { + FieldAttribute fa = getFieldAttribute(); + + Or firstOr = new Or(EMPTY, new In(EMPTY, fa, List.of(ONE)), new In(EMPTY, fa, List.of(TWO))); + Or secondOr = new Or(EMPTY, firstOr, new In(EMPTY, fa, List.of(THREE))); + Expression e = new CombineDisjunctionsToIn().rule(secondOr); + assertEquals(In.class, e.getClass()); + In in = (In) e; + assertEquals(fa, in.value()); + assertThat(in.list(), contains(ONE, TWO, THREE)); + } + + public void testOrWithNonCombinableExpressions() { + FieldAttribute fa = getFieldAttribute(); + + Or firstOr = new Or(EMPTY, new In(EMPTY, fa, List.of(ONE)), lessThanOf(fa, TWO)); + Or secondOr = new Or(EMPTY, firstOr, new In(EMPTY, fa, List.of(THREE))); + Expression e = new CombineDisjunctionsToIn().rule(secondOr); + assertEquals(Or.class, e.getClass()); + Or or = (Or) e; + assertEquals(or.left(), firstOr.right()); + assertEquals(In.class, or.right().getClass()); + In in = (In) or.right(); + assertEquals(fa, in.value()); + assertThat(in.list(), contains(ONE, THREE)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFoldingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFoldingTests.java new file mode 100644 index 0000000000000..366116d33901f --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ConstantFoldingTests.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.Nullability; +import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.Like; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.LikePattern; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLike; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardLike; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.FIVE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.THREE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.TWO; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.equalsOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOrEqualOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOrEqualOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.notEqualsOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.of; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.rangeOf; +import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; +import static org.elasticsearch.xpack.esql.core.expression.Literal.NULL; +import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; + +public class ConstantFoldingTests extends ESTestCase { + + public void testConstantFolding() { + Expression exp = new Add(EMPTY, TWO, THREE); + + assertTrue(exp.foldable()); + Expression result = new ConstantFolding().rule(exp); + assertTrue(result instanceof Literal); + assertEquals(5, ((Literal) result).value()); + + // check now with an alias + result = new ConstantFolding().rule(new Alias(EMPTY, "a", exp)); + assertEquals("a", Expressions.name(result)); + assertEquals(Alias.class, result.getClass()); + } + + public void testConstantFoldingBinaryComparison() { + assertEquals(FALSE, new ConstantFolding().rule(greaterThanOf(TWO, THREE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(greaterThanOrEqualOf(TWO, THREE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(equalsOf(TWO, THREE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(notEqualsOf(TWO, THREE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(lessThanOrEqualOf(TWO, THREE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(lessThanOf(TWO, THREE)).canonical()); + } + + public void testConstantFoldingBinaryLogic() { + assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, greaterThanOf(TWO, THREE), TRUE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, greaterThanOrEqualOf(TWO, THREE), TRUE)).canonical()); + } + + public void testConstantFoldingBinaryLogic_WithNullHandling() { + assertEquals(Nullability.TRUE, new ConstantFolding().rule(new And(EMPTY, NULL, TRUE)).canonical().nullable()); + assertEquals(Nullability.TRUE, new ConstantFolding().rule(new And(EMPTY, TRUE, NULL)).canonical().nullable()); + assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, NULL, FALSE)).canonical()); + assertEquals(FALSE, new ConstantFolding().rule(new And(EMPTY, FALSE, NULL)).canonical()); + assertEquals(Nullability.TRUE, new ConstantFolding().rule(new And(EMPTY, NULL, NULL)).canonical().nullable()); + + assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, TRUE)).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new Or(EMPTY, TRUE, NULL)).canonical()); + assertEquals(Nullability.TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, FALSE)).canonical().nullable()); + assertEquals(Nullability.TRUE, new ConstantFolding().rule(new Or(EMPTY, FALSE, NULL)).canonical().nullable()); + assertEquals(Nullability.TRUE, new ConstantFolding().rule(new Or(EMPTY, NULL, NULL)).canonical().nullable()); + } + + public void testConstantFoldingRange() { + assertEquals(true, new ConstantFolding().rule(rangeOf(FIVE, FIVE, true, new Literal(EMPTY, 10, DataType.INTEGER), false)).fold()); + assertEquals(false, new ConstantFolding().rule(rangeOf(FIVE, FIVE, false, new Literal(EMPTY, 10, DataType.INTEGER), false)).fold()); + } + + public void testConstantNot() { + assertEquals(FALSE, new ConstantFolding().rule(new Not(EMPTY, TRUE))); + assertEquals(TRUE, new ConstantFolding().rule(new Not(EMPTY, FALSE))); + } + + public void testConstantFoldingLikes() { + assertEquals(TRUE, new ConstantFolding().rule(new Like(EMPTY, of("test_emp"), new LikePattern("test%", (char) 0))).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new WildcardLike(EMPTY, of("test_emp"), new WildcardPattern("test*"))).canonical()); + assertEquals(TRUE, new ConstantFolding().rule(new RLike(EMPTY, of("test_emp"), new RLikePattern("test.emp"))).canonical()); + } + + public void testArithmeticFolding() { + assertEquals(10, foldOperator(new Add(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); + assertEquals(4, foldOperator(new Sub(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); + assertEquals(21, foldOperator(new Mul(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); + assertEquals(2, foldOperator(new Div(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); + assertEquals(1, foldOperator(new Mod(EMPTY, new Literal(EMPTY, 7, DataType.INTEGER), THREE))); + } + + private static Object foldOperator(BinaryOperator b) { + return ((Literal) new ConstantFolding().rule(b)).value(); + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNullTests.java new file mode 100644 index 0000000000000..db5d42f8bb810 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/FoldNullTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLike; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOf; +import static org.elasticsearch.xpack.esql.core.expression.Literal.NULL; +import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; + +public class FoldNullTests extends ESTestCase { + + public void testNullFoldingIsNull() { + OptimizerRules.FoldNull foldNull = new OptimizerRules.FoldNull(); + assertEquals(true, foldNull.rule(new IsNull(EMPTY, NULL)).fold()); + assertEquals(false, foldNull.rule(new IsNull(EMPTY, TRUE)).fold()); + } + + public void testGenericNullableExpression() { + OptimizerRules.FoldNull rule = new OptimizerRules.FoldNull(); + // arithmetic + assertNullLiteral(rule.rule(new Add(EMPTY, getFieldAttribute(), NULL))); + // comparison + assertNullLiteral(rule.rule(greaterThanOf(getFieldAttribute(), NULL))); + // regex + assertNullLiteral(rule.rule(new RLike(EMPTY, NULL, new RLikePattern("123")))); + } + + public void testNullFoldingDoesNotApplyOnLogicalExpressions() { + OptimizerRules.FoldNull rule = new OptimizerRules.FoldNull(); + + Or or = new Or(EMPTY, NULL, TRUE); + assertEquals(or, rule.rule(or)); + or = new Or(EMPTY, NULL, NULL); + assertEquals(or, rule.rule(or)); + + And and = new And(EMPTY, NULL, TRUE); + assertEquals(and, rule.rule(and)); + and = new And(EMPTY, NULL, NULL); + assertEquals(and, rule.rule(and)); + } + + private void assertNullLiteral(Expression expression) { + assertEquals(Literal.class, expression.getClass()); + assertNull(expression.fold()); + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRightTests.java new file mode 100644 index 0000000000000..a884080504db8 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/LiteralsOnTheRightTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.FIVE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.equalsOf; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; +import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; + +public class LiteralsOnTheRightTests extends ESTestCase { + + public void testLiteralsOnTheRight() { + Alias a = new Alias(EMPTY, "a", new Literal(EMPTY, 10, INTEGER)); + Expression result = new LiteralsOnTheRight().rule(equalsOf(FIVE, a)); + assertTrue(result instanceof Equals); + Equals eq = (Equals) result; + assertEquals(a, eq.left()); + assertEquals(FIVE, eq.right()); + + // Note: Null Equals test removed here + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEqualsTests.java new file mode 100644 index 0000000000000..99632fa127a3b --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateEqualsTests.java @@ -0,0 +1,335 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.expression.predicate.Range; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; + +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.FIVE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.FOUR; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.ONE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.THREE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.TWO; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.equalsOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOrEqualOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOrEqualOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.notEqualsOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.rangeOf; +import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; +import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; + +public class PropagateEqualsTests extends ESTestCase { + + // a == 1 AND a == 2 -> FALSE + public void testDualEqualsConjunction() { + FieldAttribute fa = getFieldAttribute(); + Equals eq1 = equalsOf(fa, ONE); + Equals eq2 = equalsOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); + assertEquals(FALSE, exp); + } + + // 1 < a < 10 AND a == 10 -> FALSE + public void testEliminateRangeByEqualsOutsideInterval() { + FieldAttribute fa = getFieldAttribute(); + Equals eq1 = equalsOf(fa, new Literal(EMPTY, 10, DataType.INTEGER)); + Range r = rangeOf(fa, ONE, false, new Literal(EMPTY, 10, DataType.INTEGER), false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(FALSE, exp); + } + + // a != 3 AND a = 3 -> FALSE + public void testPropagateEquals_VarNeq3AndVarEq3() { + FieldAttribute fa = getFieldAttribute(); + NotEquals neq = notEqualsOf(fa, THREE); + Equals eq = equalsOf(fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, neq, eq)); + assertEquals(FALSE, exp); + } + + // a != 4 AND a = 3 -> a = 3 + public void testPropagateEquals_VarNeq4AndVarEq3() { + FieldAttribute fa = getFieldAttribute(); + NotEquals neq = notEqualsOf(fa, FOUR); + Equals eq = equalsOf(fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, neq, eq)); + assertEquals(Equals.class, exp.getClass()); + assertEquals(eq, exp); + } + + // a = 2 AND a < 2 -> FALSE + public void testPropagateEquals_VarEq2AndVarLt2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThan lt = lessThanOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a <= 2 -> a = 2 + public void testPropagateEquals_VarEq2AndVarLte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThanOrEqual lt = lessThanOrEqualOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(eq, exp); + } + + // a = 2 AND a <= 1 -> FALSE + public void testPropagateEquals_VarEq2AndVarLte1() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThanOrEqual lt = lessThanOrEqualOf(fa, ONE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, lt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a > 2 -> FALSE + public void testPropagateEquals_VarEq2AndVarGt2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a >= 2 -> a = 2 + public void testPropagateEquals_VarEq2AndVarGte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThanOrEqual gte = greaterThanOrEqualOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gte)); + assertEquals(eq, exp); + } + + // a = 2 AND a > 3 -> FALSE + public void testPropagateEquals_VarEq2AndVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq, gt)); + assertEquals(FALSE, exp); + } + + // a = 2 AND a < 3 AND a > 1 AND a != 4 -> a = 2 + public void testPropagateEquals_VarEq2AndVarLt3AndVarGt1AndVarNeq4() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThan lt = lessThanOf(fa, THREE); + GreaterThan gt = greaterThanOf(fa, ONE); + NotEquals neq = notEqualsOf(fa, FOUR); + + PropagateEquals rule = new PropagateEquals(); + Expression and = Predicates.combineAnd(asList(eq, lt, gt, neq)); + Expression exp = rule.rule((And) and); + assertEquals(eq, exp); + } + + // a = 2 AND 1 < a < 3 AND a > 0 AND a != 4 -> a = 2 + public void testPropagateEquals_VarEq2AndVarRangeGt1Lt3AndVarGt0AndVarNeq4() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, ONE, false, THREE, false); + GreaterThan gt = greaterThanOf(fa, new Literal(EMPTY, 0, DataType.INTEGER)); + NotEquals neq = notEqualsOf(fa, FOUR); + + PropagateEquals rule = new PropagateEquals(); + Expression and = Predicates.combineAnd(asList(eq, range, gt, neq)); + Expression exp = rule.rule((And) and); + assertEquals(eq, exp); + } + + // a = 2 OR a > 1 -> a > 1 + public void testPropagateEquals_VarEq2OrVarGt1() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, ONE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, gt)); + assertEquals(gt, exp); + } + + // a = 2 OR a > 2 -> a >= 2 + public void testPropagateEquals_VarEq2OrVarGte2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + GreaterThan gt = greaterThanOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, gt)); + assertEquals(GreaterThanOrEqual.class, exp.getClass()); + GreaterThanOrEqual gte = (GreaterThanOrEqual) exp; + assertEquals(TWO, gte.right()); + } + + // a = 2 OR a < 3 -> a < 3 + public void testPropagateEquals_VarEq2OrVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + LessThan lt = lessThanOf(fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, lt)); + assertEquals(lt, exp); + } + + // a = 3 OR a < 3 -> a <= 3 + public void testPropagateEquals_VarEq3OrVarLt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, THREE); + LessThan lt = lessThanOf(fa, THREE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, lt)); + assertEquals(LessThanOrEqual.class, exp.getClass()); + LessThanOrEqual lte = (LessThanOrEqual) exp; + assertEquals(THREE, lte.right()); + } + + // a = 2 OR 1 < a < 3 -> 1 < a < 3 + public void testPropagateEquals_VarEq2OrVarRangeGt1Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, ONE, false, THREE, false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(range, exp); + } + + // a = 2 OR 2 < a < 3 -> 2 <= a < 3 + public void testPropagateEquals_VarEq2OrVarRangeGt2Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, TWO, false, THREE, false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(TWO, r.lower()); + assertTrue(r.includeLower()); + assertEquals(THREE, r.upper()); + assertFalse(r.includeUpper()); + } + + // a = 3 OR 2 < a < 3 -> 2 < a <= 3 + public void testPropagateEquals_VarEq3OrVarRangeGt2Lt3() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, THREE); + Range range = rangeOf(fa, TWO, false, THREE, false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, range)); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(TWO, r.lower()); + assertFalse(r.includeLower()); + assertEquals(THREE, r.upper()); + assertTrue(r.includeUpper()); + } + + // a = 2 OR a != 2 -> TRUE + public void testPropagateEquals_VarEq2OrVarNeq2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + NotEquals neq = notEqualsOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, neq)); + assertEquals(TRUE, exp); + } + + // a = 2 OR a != 5 -> a != 5 + public void testPropagateEquals_VarEq2OrVarNeq5() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + NotEquals neq = notEqualsOf(fa, FIVE); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new Or(EMPTY, eq, neq)); + assertEquals(NotEquals.class, exp.getClass()); + NotEquals ne = (NotEquals) exp; + assertEquals(FIVE, ne.right()); + } + + // a = 2 OR 3 < a < 4 OR a > 2 OR a!= 2 -> TRUE + public void testPropagateEquals_VarEq2OrVarRangeGt3Lt4OrVarGt2OrVarNe2() { + FieldAttribute fa = getFieldAttribute(); + Equals eq = equalsOf(fa, TWO); + Range range = rangeOf(fa, THREE, false, FOUR, false); + GreaterThan gt = greaterThanOf(fa, TWO); + NotEquals neq = notEqualsOf(fa, TWO); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule((Or) Predicates.combineOr(asList(eq, range, neq, gt))); + assertEquals(TRUE, exp); + } + + // a == 1 AND a == 2 -> nop for date/time fields + public void testPropagateEquals_ignoreDateTimeFields() { + FieldAttribute fa = getFieldAttribute("a", DataType.DATETIME); + Equals eq1 = equalsOf(fa, ONE); + Equals eq2 = equalsOf(fa, TWO); + And and = new And(EMPTY, eq1, eq2); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(and); + assertEquals(and, exp); + } + + // 1 <= a < 10 AND a == 1 -> a == 1 + public void testEliminateRangeByEqualsInInterval() { + FieldAttribute fa = getFieldAttribute(); + Equals eq1 = equalsOf(fa, ONE); + Range r = rangeOf(fa, ONE, true, new Literal(EMPTY, 10, DataType.INTEGER), false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(eq1, exp); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullableTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullableTests.java new file mode 100644 index 0000000000000..23c0886f1a7d3 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/PropagateNullableTests.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.ONE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.THREE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.TWO; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.greaterThanOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.lessThanOf; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.relation; +import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; +import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; + +public class PropagateNullableTests extends ESTestCase { + private Literal nullOf(DataType dataType) { + return new Literal(Source.EMPTY, null, dataType); + } + + // a IS NULL AND a IS NOT NULL => false + public void testIsNullAndNotNull() { + FieldAttribute fa = getFieldAttribute(); + + And and = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); + assertEquals(FALSE, new OptimizerRules.PropagateNullable().rule(and)); + } + + // a IS NULL AND b IS NOT NULL AND c IS NULL AND d IS NOT NULL AND e IS NULL AND a IS NOT NULL => false + public void testIsNullAndNotNullMultiField() { + FieldAttribute fa = getFieldAttribute(); + + And andOne = new And(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, getFieldAttribute())); + And andTwo = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute()), new IsNotNull(EMPTY, getFieldAttribute())); + And andThree = new And(EMPTY, new IsNull(EMPTY, getFieldAttribute()), new IsNotNull(EMPTY, fa)); + + And and = new And(EMPTY, andOne, new And(EMPTY, andThree, andTwo)); + + assertEquals(FALSE, new OptimizerRules.PropagateNullable().rule(and)); + } + + // a IS NULL AND a > 1 => a IS NULL AND false + public void testIsNullAndComparison() { + FieldAttribute fa = getFieldAttribute(); + IsNull isNull = new IsNull(EMPTY, fa); + + And and = new And(EMPTY, isNull, greaterThanOf(fa, ONE)); + assertEquals(new And(EMPTY, isNull, nullOf(BOOLEAN)), new OptimizerRules.PropagateNullable().rule(and)); + } + + // a IS NULL AND b < 1 AND c < 1 AND a < 1 => a IS NULL AND b < 1 AND c < 1 => a IS NULL AND b < 1 AND c < 1 + public void testIsNullAndMultipleComparison() { + FieldAttribute fa = getFieldAttribute(); + IsNull isNull = new IsNull(EMPTY, fa); + + And nestedAnd = new And(EMPTY, lessThanOf(getFieldAttribute("b"), ONE), lessThanOf(getFieldAttribute("c"), ONE)); + And and = new And(EMPTY, isNull, nestedAnd); + And top = new And(EMPTY, and, lessThanOf(fa, ONE)); + + Expression optimized = new OptimizerRules.PropagateNullable().rule(top); + Expression expected = new And(EMPTY, and, nullOf(BOOLEAN)); + assertEquals(Predicates.splitAnd(expected), Predicates.splitAnd(optimized)); + } + + // ((a+1)/2) > 1 AND a + 2 AND a IS NULL AND b < 3 => NULL AND NULL AND a IS NULL AND b < 3 + public void testIsNullAndDeeplyNestedExpression() { + FieldAttribute fa = getFieldAttribute(); + IsNull isNull = new IsNull(EMPTY, fa); + + Expression nullified = new And( + EMPTY, + greaterThanOf(new Div(EMPTY, new Add(EMPTY, fa, ONE), TWO), ONE), + greaterThanOf(new Add(EMPTY, fa, TWO), ONE) + ); + Expression kept = new And(EMPTY, isNull, lessThanOf(getFieldAttribute("b"), THREE)); + And and = new And(EMPTY, nullified, kept); + + Expression optimized = new OptimizerRules.PropagateNullable().rule(and); + Expression expected = new And(EMPTY, new And(EMPTY, nullOf(BOOLEAN), nullOf(BOOLEAN)), kept); + + assertEquals(Predicates.splitAnd(expected), Predicates.splitAnd(optimized)); + } + + // a IS NULL OR a IS NOT NULL => no change + // a IS NULL OR a > 1 => no change + public void testIsNullInDisjunction() { + FieldAttribute fa = getFieldAttribute(); + + Or or = new Or(EMPTY, new IsNull(EMPTY, fa), new IsNotNull(EMPTY, fa)); + Filter dummy = new Filter(EMPTY, relation(), or); + LogicalPlan transformed = new OptimizerRules.PropagateNullable().apply(dummy); + assertSame(dummy, transformed); + assertEquals(or, ((Filter) transformed).condition()); + + or = new Or(EMPTY, new IsNull(EMPTY, fa), greaterThanOf(fa, ONE)); + dummy = new Filter(EMPTY, relation(), or); + transformed = new OptimizerRules.PropagateNullable().apply(dummy); + assertSame(dummy, transformed); + assertEquals(or, ((Filter) transformed).condition()); + } + + // a + 1 AND (a IS NULL OR a > 3) => no change + public void testIsNullDisjunction() { + FieldAttribute fa = getFieldAttribute(); + IsNull isNull = new IsNull(EMPTY, fa); + + Or or = new Or(EMPTY, isNull, greaterThanOf(fa, THREE)); + And and = new And(EMPTY, new Add(EMPTY, fa, ONE), or); + + assertEquals(and, new OptimizerRules.PropagateNullable().rule(and)); + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatchTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatchTests.java new file mode 100644 index 0000000000000..62b13e6c9cc03 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceRegexMatchTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.Like; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.LikePattern; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLike; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardLike; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.WildcardPattern; +import org.elasticsearch.xpack.esql.core.util.StringUtils; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; + +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getFieldAttribute; +import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; + +public class ReplaceRegexMatchTests extends ESTestCase { + + public void testMatchAllLikeToExist() { + for (String s : asList("%", "%%", "%%%")) { + LikePattern pattern = new LikePattern(s, (char) 0); + FieldAttribute fa = getFieldAttribute(); + Like l = new Like(EMPTY, fa, pattern); + Expression e = new ReplaceRegexMatch().rule(l); + assertEquals(IsNotNull.class, e.getClass()); + IsNotNull inn = (IsNotNull) e; + assertEquals(fa, inn.field()); + } + } + + public void testMatchAllWildcardLikeToExist() { + for (String s : asList("*", "**", "***")) { + WildcardPattern pattern = new WildcardPattern(s); + FieldAttribute fa = getFieldAttribute(); + WildcardLike l = new WildcardLike(EMPTY, fa, pattern); + Expression e = new ReplaceRegexMatch().rule(l); + assertEquals(IsNotNull.class, e.getClass()); + IsNotNull inn = (IsNotNull) e; + assertEquals(fa, inn.field()); + } + } + + public void testMatchAllRLikeToExist() { + RLikePattern pattern = new RLikePattern(".*"); + FieldAttribute fa = getFieldAttribute(); + RLike l = new RLike(EMPTY, fa, pattern); + Expression e = new ReplaceRegexMatch().rule(l); + assertEquals(IsNotNull.class, e.getClass()); + IsNotNull inn = (IsNotNull) e; + assertEquals(fa, inn.field()); + } + + public void testExactMatchLike() { + for (String s : asList("ab", "ab0%", "ab0_c")) { + LikePattern pattern = new LikePattern(s, '0'); + FieldAttribute fa = getFieldAttribute(); + Like l = new Like(EMPTY, fa, pattern); + Expression e = new ReplaceRegexMatch().rule(l); + assertEquals(Equals.class, e.getClass()); + Equals eq = (Equals) e; + assertEquals(fa, eq.left()); + assertEquals(s.replace("0", StringUtils.EMPTY), eq.right().fold()); + } + } + + public void testExactMatchWildcardLike() { + String s = "ab"; + WildcardPattern pattern = new WildcardPattern(s); + FieldAttribute fa = getFieldAttribute(); + WildcardLike l = new WildcardLike(EMPTY, fa, pattern); + Expression e = new ReplaceRegexMatch().rule(l); + assertEquals(Equals.class, e.getClass()); + Equals eq = (Equals) e; + assertEquals(fa, eq.left()); + assertEquals(s, eq.right().fold()); + } + + public void testExactMatchRLike() { + RLikePattern pattern = new RLikePattern("abc"); + FieldAttribute fa = getFieldAttribute(); + RLike l = new RLike(EMPTY, fa, pattern); + Expression e = new ReplaceRegexMatch().rule(l); + assertEquals(Equals.class, e.getClass()); + Equals eq = (Equals) e; + assertEquals(fa, eq.left()); + assertEquals("abc", eq.right().fold()); + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java index 97fb145d4c2e4..d575ba1fcb55a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/AbstractStatementParserTests.java @@ -12,8 +12,8 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.math.BigInteger; import java.util.ArrayList; @@ -40,7 +40,7 @@ void assertStatement(String statement, LogicalPlan expected) { } LogicalPlan statement(String e) { - return statement(e, QueryParams.EMPTY); + return statement(e, new QueryParams()); } LogicalPlan statement(String e, QueryParams params) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java index b24d9e6083b69..80a2d49d0d94a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java @@ -13,14 +13,12 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; +import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; @@ -31,6 +29,8 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.plan.logical.Drop; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.Rename; @@ -42,7 +42,6 @@ import java.util.stream.IntStream; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; -import static org.elasticsearch.xpack.esql.core.expression.function.FunctionResolutionStrategy.DEFAULT; import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; @@ -50,6 +49,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; +import static org.elasticsearch.xpack.esql.expression.function.FunctionResolutionStrategy.DEFAULT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 5251d7ed03d81..2a4c5eeeeaaea 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -12,20 +12,19 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.core.capabilities.UnresolvedException; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.EmptyAttribute; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; -import org.elasticsearch.xpack.esql.core.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.esql.core.plan.TableIdentifier; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; @@ -34,18 +33,22 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsqlAggregate; -import org.elasticsearch.xpack.esql.plan.logical.EsqlUnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Explain; +import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; import org.elasticsearch.xpack.esql.plan.logical.InlineStats; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Lookup; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import java.util.List; import java.util.Map; @@ -54,10 +57,10 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.core.expression.Literal.FALSE; import static org.elasticsearch.xpack.esql.core.expression.Literal.TRUE; -import static org.elasticsearch.xpack.esql.core.expression.function.FunctionResolutionStrategy.DEFAULT; import static org.elasticsearch.xpack.esql.core.tree.Source.EMPTY; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.expression.function.FunctionResolutionStrategy.DEFAULT; import static org.elasticsearch.xpack.esql.parser.ExpressionBuilder.breakIntoFragments; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; @@ -70,8 +73,6 @@ //@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") public class StatementParserTests extends AbstractStatementParserTests { - private static String FROM = "from test"; - public void testRowCommand() { assertEquals( new Row(EMPTY, List.of(new Alias(EMPTY, "a", integer(1)), new Alias(EMPTY, "b", integer(2)))), @@ -104,6 +105,17 @@ public void testRowCommandHugeInt() { ); } + public void testRowCommandHugeNegativeInt() { + assertEquals( + new Row(EMPTY, List.of(new Alias(EMPTY, "c", literalDouble(-92233720368547758080d)))), + statement("row c = -92233720368547758080") + ); + assertEquals( + new Row(EMPTY, List.of(new Alias(EMPTY, "c", literalDouble(-18446744073709551616d)))), + statement("row c = -18446744073709551616") + ); + } + public void testRowCommandDouble() { assertEquals(new Row(EMPTY, List.of(new Alias(EMPTY, "c", literalDouble(1.0)))), statement("row c = 1.0")); } @@ -234,9 +246,10 @@ public void testEvalImplicitNames() { public void testStatsWithGroups() { assertEquals( - new EsqlAggregate( + new Aggregate( EMPTY, PROCESSING_CMD_INPUT, + Aggregate.AggregateType.STANDARD, List.of(attribute("c"), attribute("d.e")), List.of( new Alias(EMPTY, "b", new UnresolvedFunction(EMPTY, "min", DEFAULT, List.of(attribute("a")))), @@ -250,9 +263,10 @@ public void testStatsWithGroups() { public void testStatsWithoutGroups() { assertEquals( - new EsqlAggregate( + new Aggregate( EMPTY, PROCESSING_CMD_INPUT, + Aggregate.AggregateType.STANDARD, List.of(), List.of( new Alias(EMPTY, "min(a)", new UnresolvedFunction(EMPTY, "min", DEFAULT, List.of(attribute("a")))), @@ -265,7 +279,7 @@ public void testStatsWithoutGroups() { public void testStatsWithoutAggs() throws Exception { assertEquals( - new EsqlAggregate(EMPTY, PROCESSING_CMD_INPUT, List.of(attribute("a")), List.of(attribute("a"))), + new Aggregate(EMPTY, PROCESSING_CMD_INPUT, Aggregate.AggregateType.STANDARD, List.of(attribute("a")), List.of(attribute("a"))), processingCommand("stats by a") ); } @@ -326,19 +340,128 @@ public void testInlineStatsWithoutGroups() { ); } - public void testIdentifiersAsIndexPattern() { - // assertIdentifierAsIndexPattern("foo", "from `foo`"); - // assertIdentifierAsIndexPattern("foo,test-*", "from `foo`,`test-*`"); - assertIdentifierAsIndexPattern("foo,test-*", "from foo,test-*"); - assertIdentifierAsIndexPattern("123-test@foo_bar+baz1", "from 123-test@foo_bar+baz1"); - // assertIdentifierAsIndexPattern("foo,test-*,abc", "from `foo`,`test-*`,abc"); - // assertIdentifierAsIndexPattern("foo, test-*, abc, xyz", "from `foo, test-*, abc, xyz`"); - // assertIdentifierAsIndexPattern("foo, test-*, abc, xyz,test123", "from `foo, test-*, abc, xyz`, test123"); - assertIdentifierAsIndexPattern("foo,test,xyz", "from foo, test,xyz"); - assertIdentifierAsIndexPattern( - "", // , - "from " // , `` + public void testStringAsIndexPattern() { + for (String command : List.of("FROM", "METRICS")) { + assertStringAsIndexPattern("foo", command + " \"foo\""); + assertStringAsIndexPattern("foo,test-*", command + """ + "foo","test-*" + """); + assertStringAsIndexPattern("foo,test-*", command + " foo,test-*"); + assertStringAsIndexPattern("123-test@foo_bar+baz1", command + " 123-test@foo_bar+baz1"); + assertStringAsIndexPattern("foo,test-*,abc", command + """ + "foo","test-*",abc + """); + assertStringAsIndexPattern("foo, test-*, abc, xyz", command + """ + "foo, test-*, abc, xyz" + """); + assertStringAsIndexPattern("foo, test-*, abc, xyz,test123", command + """ + "foo, test-*, abc, xyz", test123 + """); + assertStringAsIndexPattern("foo,test,xyz", command + " foo, test,xyz"); + assertStringAsIndexPattern( + ",", + command + " , \"\"" + ); + + assertStringAsIndexPattern("foo,test,xyz", command + " \"\"\"foo\"\"\", test,\"xyz\""); + + assertStringAsIndexPattern("`backtick`,``multiple`back``ticks```", command + " `backtick`, ``multiple`back``ticks```"); + + assertStringAsIndexPattern("test,metadata,metaata,.metadata", command + " test,\"metadata\", metaata, .metadata"); + + assertStringAsIndexPattern(".dot", command + " .dot"); + + assertStringAsIndexPattern("cluster:index", command + " cluster:index"); + assertStringAsIndexPattern("cluster:index|pattern", command + " cluster:\"index|pattern\""); + assertStringAsIndexPattern("cluster:.index", command + " cluster:.index"); + assertStringAsIndexPattern("cluster*:index*", command + " cluster*:index*"); + assertStringAsIndexPattern("cluster*:*", command + " cluster*:*"); + assertStringAsIndexPattern("*:index*", command + " *:index*"); + assertStringAsIndexPattern("*:index|pattern", command + " *:\"index|pattern\""); + assertStringAsIndexPattern("*:*", command + " *:*"); + assertStringAsIndexPattern("*:*,cluster*:index|pattern,i|p", command + " *:*, cluster*:\"index|pattern\", \"i|p\""); + } + } + + public void testStringAsLookupIndexPattern() { + assertStringAsLookupIndexPattern("foo", "ROW x = 1 | LOOKUP \"foo\" ON j"); + assertStringAsLookupIndexPattern("test-*", """ + ROW x = 1 | LOOKUP "test-*" ON j + """); + assertStringAsLookupIndexPattern("test-*", "ROW x = 1 | LOOKUP test-* ON j"); + assertStringAsLookupIndexPattern("123-test@foo_bar+baz1", "ROW x = 1 | LOOKUP 123-test@foo_bar+baz1 ON j"); + assertStringAsLookupIndexPattern("foo, test-*, abc, xyz", """ + ROW x = 1 | LOOKUP "foo, test-*, abc, xyz" ON j + """); + assertStringAsLookupIndexPattern("", "ROW x = 1 | LOOKUP ON j"); + assertStringAsLookupIndexPattern( + "", + "ROW x = 1 | LOOKUP \"\" ON j" ); + + assertStringAsLookupIndexPattern("foo", "ROW x = 1 | LOOKUP \"\"\"foo\"\"\" ON j"); + + assertStringAsLookupIndexPattern("`backtick`", "ROW x = 1 | LOOKUP `backtick` ON j"); + assertStringAsLookupIndexPattern("``multiple`back``ticks```", "ROW x = 1 | LOOKUP ``multiple`back``ticks``` ON j"); + + assertStringAsLookupIndexPattern(".dot", "ROW x = 1 | LOOKUP .dot ON j"); + + assertStringAsLookupIndexPattern("cluster:index", "ROW x = 1 | LOOKUP cluster:index ON j"); + assertStringAsLookupIndexPattern("cluster:.index", "ROW x = 1 | LOOKUP cluster:.index ON j"); + assertStringAsLookupIndexPattern("cluster*:index*", "ROW x = 1 | LOOKUP cluster*:index* ON j"); + assertStringAsLookupIndexPattern("cluster*:*", "ROW x = 1 | LOOKUP cluster*:* ON j"); + assertStringAsLookupIndexPattern("*:index*", "ROW x = 1 | LOOKUP *:index* ON j"); + assertStringAsLookupIndexPattern("*:*", "ROW x = 1 | LOOKUP *:* ON j"); + + } + + public void testInvalidQuotingAsFromIndexPattern() { + expectError("FROM \"foo", ": token recognition error at: '\"foo'"); + expectError("FROM \"foo | LIMIT 1", ": token recognition error at: '\"foo | LIMIT 1'"); + expectError("FROM \"\"\"foo", ": token recognition error at: '\"foo'"); + + expectError("FROM foo\"", ": token recognition error at: '\"'"); + expectError("FROM foo\" | LIMIT 2", ": token recognition error at: '\" | LIMIT 2'"); + expectError("FROM foo\"\"\"", ": token recognition error at: '\"'"); + + expectError("FROM \"foo\"bar\"", ": token recognition error at: '\"'"); + expectError("FROM \"foo\"\"bar\"", ": extraneous input '\"bar\"' expecting "); + + expectError("FROM \"\"\"foo\"\"\"bar\"\"\"", ": mismatched input 'bar' expecting {, '|', ',', OPENING_BRACKET, 'metadata'}"); + expectError( + "FROM \"\"\"foo\"\"\"\"\"\"bar\"\"\"", + ": mismatched input '\"bar\"' expecting {, '|', ',', OPENING_BRACKET, 'metadata'}" + ); + } + + public void testInvalidQuotingAsMetricsIndexPattern() { + expectError("METRICS \"foo", ": token recognition error at: '\"foo'"); + expectError("METRICS \"foo | LIMIT 1", ": token recognition error at: '\"foo | LIMIT 1'"); + expectError("METRICS \"\"\"foo", ": token recognition error at: '\"'"); + + expectError("METRICS foo\"", ": token recognition error at: '\"'"); + expectError("METRICS foo\" | LIMIT 2", ": token recognition error at: '\"'"); + expectError("METRICS foo\"\"\"", ": token recognition error at: '\"'"); + + expectError("METRICS \"foo\"bar\"", ": token recognition error at: '\"'"); + expectError("METRICS \"foo\"\"bar\"", ": token recognition error at: '\"'"); + + expectError("METRICS \"\"\"foo\"\"\"bar\"\"\"", ": token recognition error at: '\"'"); + expectError("METRICS \"\"\"foo\"\"\"\"\"\"bar\"\"\"", ": token recognition error at: '\"'"); + } + + public void testInvalidQuotingAsLookupIndexPattern() { + expectError("ROW x = 1 | LOOKUP \"foo ON j", ": token recognition error at: '\"foo ON j'"); + expectError("ROW x = 1 | LOOKUP \"\"\"foo ON j", ": token recognition error at: '\"foo ON j'"); + + expectError("ROW x = 1 | LOOKUP foo\" ON j", ": token recognition error at: '\" ON j'"); + expectError("ROW x = 1 | LOOKUP foo\"\"\" ON j", ": token recognition error at: '\" ON j'"); + + expectError("ROW x = 1 | LOOKUP \"foo\"bar\" ON j", ": token recognition error at: '\" ON j'"); + expectError("ROW x = 1 | LOOKUP \"foo\"\"bar\" ON j", ": extraneous input '\"bar\"' expecting 'on'"); + + expectError("ROW x = 1 | LOOKUP \"\"\"foo\"\"\"bar\"\"\" ON j", ": mismatched input 'bar' expecting 'on'"); + expectError("ROW x = 1 | LOOKUP \"\"\"foo\"\"\"\"\"\"bar\"\"\" ON j", "line 1:31: mismatched input '\"bar\"' expecting 'on'"); } public void testIdentifierAsFieldName() { @@ -393,7 +516,7 @@ public void testBasicLimitCommand() { assertThat(limit.children().size(), equalTo(1)); assertThat(limit.children().get(0), instanceOf(Filter.class)); assertThat(limit.children().get(0).children().size(), equalTo(1)); - assertThat(limit.children().get(0).children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); } public void testLimitConstraints() { @@ -443,7 +566,7 @@ public void testBasicSortCommand() { assertThat(orderBy.children().size(), equalTo(1)); assertThat(orderBy.children().get(0), instanceOf(Filter.class)); assertThat(orderBy.children().get(0).children().size(), equalTo(1)); - assertThat(orderBy.children().get(0).children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(orderBy.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); } public void testSubquery() { @@ -642,15 +765,27 @@ public void testDissectPattern() { public void testGrokPattern() { LogicalPlan cmd = processingCommand("grok a \"%{WORD:foo}\""); assertEquals(Grok.class, cmd.getClass()); - Grok dissect = (Grok) cmd; - assertEquals("%{WORD:foo}", dissect.parser().pattern()); - assertEquals(List.of(referenceAttribute("foo", KEYWORD)), dissect.extractedFields()); + Grok grok = (Grok) cmd; + assertEquals("%{WORD:foo}", grok.parser().pattern()); + assertEquals(List.of(referenceAttribute("foo", KEYWORD)), grok.extractedFields()); ParsingException pe = expectThrows(ParsingException.class, () -> statement("row a = \"foo bar\" | grok a \"%{_invalid_:x}\"")); assertThat( pe.getMessage(), containsString("Invalid pattern [%{_invalid_:x}] for grok: Unable to find pattern [_invalid_] in Grok's pattern dictionary") ); + + cmd = processingCommand("grok a \"%{WORD:foo} %{WORD:foo}\""); + assertEquals(Grok.class, cmd.getClass()); + grok = (Grok) cmd; + assertEquals("%{WORD:foo} %{WORD:foo}", grok.parser().pattern()); + assertEquals(List.of(referenceAttribute("foo", KEYWORD)), grok.extractedFields()); + + expectError( + "row a = \"foo bar\" | GROK a \"%{NUMBER:foo} %{WORD:foo}\"", + "line 1:22: Invalid GROK pattern [%{NUMBER:foo} %{WORD:foo}]:" + + " the attribute [foo] is defined multiple times with different types" + ); } public void testLikeRLike() { @@ -860,7 +995,7 @@ public void testInvalidNamedParams() { expectError( "from test | where x < ?n1 | eval y = ?n2", List.of(new QueryParam("n1", 5, INTEGER), new QueryParam("n3", 5, INTEGER)), - "Unknown query parameter [n2], did you mean any of [n1, n3]?" + "Unknown query parameter [n2], did you mean any of [n3, n1]?" ); expectError("from test | where x < ?_1", List.of(new QueryParam("_1", 5, INTEGER)), "extraneous input '_1' expecting "); @@ -937,7 +1072,7 @@ public void testParamInWhere() { Filter w = (Filter) limit.children().get(0); assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); assertThat(limit.children().get(0).children().size(), equalTo(1)); - assertThat(limit.children().get(0).children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); plan = statement("from test | where x < ?n1 | limit 10", new QueryParams(List.of(new QueryParam("n1", 5, INTEGER)))); assertThat(plan, instanceOf(Limit.class)); @@ -949,7 +1084,7 @@ public void testParamInWhere() { w = (Filter) limit.children().get(0); assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); assertThat(limit.children().get(0).children().size(), equalTo(1)); - assertThat(limit.children().get(0).children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); plan = statement("from test | where x < ?1 | limit 10", new QueryParams(List.of(new QueryParam(null, 5, INTEGER)))); assertThat(plan, instanceOf(Limit.class)); @@ -961,7 +1096,7 @@ public void testParamInWhere() { w = (Filter) limit.children().get(0); assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); assertThat(limit.children().get(0).children().size(), equalTo(1)); - assertThat(limit.children().get(0).children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamInEval() { @@ -983,7 +1118,7 @@ public void testParamInEval() { Filter f = (Filter) eval.children().get(0); assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); - assertThat(f.children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); plan = statement( "from test | where x < ?n1 | eval y = ?n2 + ?n3 | limit 10", @@ -1003,7 +1138,7 @@ public void testParamInEval() { f = (Filter) eval.children().get(0); assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); - assertThat(f.children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); plan = statement( "from test | where x < ?1 | eval y = ?2 + ?1 | limit 10", @@ -1021,7 +1156,7 @@ public void testParamInEval() { f = (Filter) eval.children().get(0); assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); - assertThat(f.children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamInAggFunction() { @@ -1036,8 +1171,8 @@ public void testParamInAggFunction() { ) ) ); - assertThat(plan, instanceOf(EsqlAggregate.class)); - EsqlAggregate agg = (EsqlAggregate) plan; + assertThat(plan, instanceOf(Aggregate.class)); + Aggregate agg = (Aggregate) plan; assertThat(((Literal) agg.aggregates().get(0).children().get(0).children().get(0)).value(), equalTo("*")); assertThat(agg.child(), instanceOf(Eval.class)); assertThat(agg.children().size(), equalTo(1)); @@ -1048,7 +1183,7 @@ public void testParamInAggFunction() { Filter f = (Filter) eval.children().get(0); assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); - assertThat(f.children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); plan = statement( "from test | where x < ?n1 | eval y = ?n2 + ?n3 | stats count(?n4) by z", @@ -1061,8 +1196,8 @@ public void testParamInAggFunction() { ) ) ); - assertThat(plan, instanceOf(EsqlAggregate.class)); - agg = (EsqlAggregate) plan; + assertThat(plan, instanceOf(Aggregate.class)); + agg = (Aggregate) plan; assertThat(((Literal) agg.aggregates().get(0).children().get(0).children().get(0)).value(), equalTo("*")); assertThat(agg.child(), instanceOf(Eval.class)); assertThat(agg.children().size(), equalTo(1)); @@ -1073,7 +1208,7 @@ public void testParamInAggFunction() { f = (Filter) eval.children().get(0); assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); - assertThat(f.children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); plan = statement( "from test | where x < ?1 | eval y = ?2 + ?1 | stats count(?3) by z", @@ -1081,8 +1216,8 @@ public void testParamInAggFunction() { List.of(new QueryParam(null, 5, INTEGER), new QueryParam(null, -1, INTEGER), new QueryParam(null, "*", KEYWORD)) ) ); - assertThat(plan, instanceOf(EsqlAggregate.class)); - agg = (EsqlAggregate) plan; + assertThat(plan, instanceOf(Aggregate.class)); + agg = (Aggregate) plan; assertThat(((Literal) agg.aggregates().get(0).children().get(0).children().get(0)).value(), equalTo("*")); assertThat(agg.child(), instanceOf(Eval.class)); assertThat(agg.children().size(), equalTo(1)); @@ -1093,7 +1228,7 @@ public void testParamInAggFunction() { f = (Filter) eval.children().get(0); assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); - assertThat(f.children().get(0), instanceOf(EsqlUnresolvedRelation.class)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamMixed() { @@ -1157,11 +1292,28 @@ public void testQuotedName() { assertThat(Expressions.names(project.projections()), contains("count(`my-field`)")); } - private void assertIdentifierAsIndexPattern(String identifier, String statement) { + private void assertStringAsIndexPattern(String string, String statement) { + if (Build.current().isProductionRelease() && statement.contains("METRIC")) { + var e = expectThrows(IllegalArgumentException.class, () -> statement(statement)); + assertThat(e.getMessage(), containsString("METRICS command currently requires a snapshot build")); + return; + } LogicalPlan from = statement(statement); - assertThat(from, instanceOf(EsqlUnresolvedRelation.class)); - EsqlUnresolvedRelation table = (EsqlUnresolvedRelation) from; - assertThat(table.table().index(), is(identifier)); + assertThat(from, instanceOf(UnresolvedRelation.class)); + UnresolvedRelation table = (UnresolvedRelation) from; + assertThat(table.table().index(), is(string)); + } + + private void assertStringAsLookupIndexPattern(String string, String statement) { + if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> statement(statement)); + assertThat(e.getMessage(), containsString("line 1:14: LOOKUP is in preview and only available in SNAPSHOT build")); + return; + } + var plan = statement(statement); + var lookup = as(plan, Lookup.class); + var tableName = as(lookup.tableName(), Literal.class); + assertThat(tableName.fold(), equalTo(string)); } public void testIdPatternUnquoted() throws Exception { @@ -1220,7 +1372,13 @@ public void testInlineConvertWithNonexistentType() { } public void testLookup() { - var plan = statement("ROW a = 1 | LOOKUP t ON j"); + String query = "ROW a = 1 | LOOKUP t ON j"; + if (Build.current().isProductionRelease()) { + var e = expectThrows(ParsingException.class, () -> statement(query)); + assertThat(e.getMessage(), containsString("line 1:14: LOOKUP is in preview and only available in SNAPSHOT build")); + return; + } + var plan = statement(query); var lookup = as(plan, Lookup.class); var tableName = as(lookup.tableName(), Literal.class); assertThat(tableName.fold(), equalTo("t")); @@ -1236,45 +1394,23 @@ public void testInlineConvertUnsupportedType() { public void testMetricsWithoutStats() { assumeTrue("requires snapshot build", Build.current().isSnapshot()); - assertStatement( - "METRICS foo", - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo"), List.of(), IndexMode.TIME_SERIES) - ); - assertStatement( - "METRICS foo,bar", - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo,bar"), List.of(), IndexMode.TIME_SERIES) - ); - assertStatement( - "METRICS foo*,bar", - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo*,bar"), List.of(), IndexMode.TIME_SERIES) - ); - assertStatement( - "METRICS foo-*,bar", - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo-*,bar"), List.of(), IndexMode.TIME_SERIES) - ); - assertStatement( - "METRICS foo-*,bar+*", - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo-*,bar+*"), List.of(), IndexMode.TIME_SERIES) - ); + assertStatement("METRICS foo", unresolvedRelation("foo")); + assertStatement("METRICS foo,bar", unresolvedRelation("foo,bar")); + assertStatement("METRICS foo*,bar", unresolvedRelation("foo*,bar")); + assertStatement("METRICS foo-*,bar", unresolvedRelation("foo-*,bar")); + assertStatement("METRICS foo-*,bar+*", unresolvedRelation("foo-*,bar+*")); } public void testMetricsIdentifiers() { assumeTrue("requires snapshot build", Build.current().isSnapshot()); - Map patterns = Map.of( - "metrics foo,test-*", - "foo,test-*", - "metrics 123-test@foo_bar+baz1", - "123-test@foo_bar+baz1", - "metrics foo, test,xyz", - "foo,test,xyz", - "metrics >", - ">" + Map patterns = Map.ofEntries( + Map.entry("metrics foo,test-*", "foo,test-*"), + Map.entry("metrics 123-test@foo_bar+baz1", "123-test@foo_bar+baz1"), + Map.entry("metrics foo, test,xyz", "foo,test,xyz"), + Map.entry("metrics >", ">") ); for (Map.Entry e : patterns.entrySet()) { - assertStatement( - e.getKey(), - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, e.getValue()), List.of(), IndexMode.TIME_SERIES) - ); + assertStatement(e.getKey(), unresolvedRelation(e.getValue())); } } @@ -1282,27 +1418,30 @@ public void testSimpleMetricsWithStats() { assumeTrue("requires snapshot build", Build.current().isSnapshot()); assertStatement( "METRICS foo load=avg(cpu) BY ts", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo"), + Aggregate.AggregateType.METRICS, List.of(attribute("ts")), List.of(new Alias(EMPTY, "load", new UnresolvedFunction(EMPTY, "avg", DEFAULT, List.of(attribute("cpu")))), attribute("ts")) ) ); assertStatement( "METRICS foo,bar load=avg(cpu) BY ts", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo,bar"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo,bar"), + Aggregate.AggregateType.METRICS, List.of(attribute("ts")), List.of(new Alias(EMPTY, "load", new UnresolvedFunction(EMPTY, "avg", DEFAULT, List.of(attribute("cpu")))), attribute("ts")) ) ); assertStatement( "METRICS foo,bar load=avg(cpu),max(rate(requests)) BY ts", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo,bar"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo,bar"), + Aggregate.AggregateType.METRICS, List.of(attribute("ts")), List.of( new Alias(EMPTY, "load", new UnresolvedFunction(EMPTY, "avg", DEFAULT, List.of(attribute("cpu")))), @@ -1322,45 +1461,50 @@ public void testSimpleMetricsWithStats() { ); assertStatement( "METRICS foo* count(errors)", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo*"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo*"), + Aggregate.AggregateType.METRICS, List.of(), List.of(new Alias(EMPTY, "count(errors)", new UnresolvedFunction(EMPTY, "count", DEFAULT, List.of(attribute("errors"))))) ) ); assertStatement( "METRICS foo* a(b)", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo*"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo*"), + Aggregate.AggregateType.METRICS, List.of(), List.of(new Alias(EMPTY, "a(b)", new UnresolvedFunction(EMPTY, "a", DEFAULT, List.of(attribute("b"))))) ) ); assertStatement( "METRICS foo* a(b)", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo*"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo*"), + Aggregate.AggregateType.METRICS, List.of(), List.of(new Alias(EMPTY, "a(b)", new UnresolvedFunction(EMPTY, "a", DEFAULT, List.of(attribute("b"))))) ) ); assertStatement( "METRICS foo* a1(b2)", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo*"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo*"), + Aggregate.AggregateType.METRICS, List.of(), List.of(new Alias(EMPTY, "a1(b2)", new UnresolvedFunction(EMPTY, "a1", DEFAULT, List.of(attribute("b2"))))) ) ); assertStatement( "METRICS foo*,bar* b = min(a) by c, d.e", - new EsqlAggregate( + new Aggregate( EMPTY, - new EsqlUnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, "foo*,bar*"), List.of(), IndexMode.TIME_SERIES), + unresolvedTSRelation("foo*,bar*"), + Aggregate.AggregateType.METRICS, List.of(attribute("c"), attribute("d.e")), List.of( new Alias(EMPTY, "b", new UnresolvedFunction(EMPTY, "min", DEFAULT, List.of(attribute("a")))), @@ -1371,6 +1515,15 @@ public void testSimpleMetricsWithStats() { ); } + private LogicalPlan unresolvedRelation(String index) { + return new UnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, index), false, List.of(), IndexMode.STANDARD, null); + } + + private LogicalPlan unresolvedTSRelation(String index) { + List metadata = List.of(new MetadataAttribute(EMPTY, MetadataAttribute.TSID_FIELD, DataType.KEYWORD, false)); + return new UnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, index), false, metadata, IndexMode.TIME_SERIES, null); + } + public void testMetricWithGroupKeyAsAgg() { assumeTrue("requires snapshot build", Build.current().isSnapshot()); var queries = List.of("METRICS foo a BY a"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/QueryPlanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/QueryPlanTests.java index a62a515ee551b..a254207865ad5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/QueryPlanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/QueryPlanTests.java @@ -13,11 +13,11 @@ import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.plan.logical.Filter; -import org.elasticsearch.xpack.esql.core.plan.logical.Limit; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.core.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; import org.elasticsearch.xpack.esql.plan.logical.Project; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java new file mode 100644 index 0000000000000..91f25e6f83579 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.logical; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +public class JoinTests extends ESTestCase { + public void testExpressionsAndReferences() { + int numMatchFields = between(1, 10); + + List matchFields = new ArrayList<>(numMatchFields); + List leftFields = new ArrayList<>(numMatchFields); + List leftAttributes = new ArrayList<>(numMatchFields); + List rightFields = new ArrayList<>(numMatchFields); + List rightAttributes = new ArrayList<>(numMatchFields); + + for (int i = 0; i < numMatchFields; i++) { + Alias left = aliasForLiteral("left" + i); + Alias right = aliasForLiteral("right" + i); + + leftFields.add(left); + leftAttributes.add(left.toAttribute()); + rightFields.add(right); + rightAttributes.add(right.toAttribute()); + matchFields.add(randomBoolean() ? left.toAttribute() : right.toAttribute()); + } + + Row left = new Row(Source.EMPTY, leftFields); + Row right = new Row(Source.EMPTY, rightFields); + + JoinConfig joinConfig = new JoinConfig(JoinType.LEFT, matchFields, leftAttributes, rightAttributes); + Join join = new Join(Source.EMPTY, left, right, joinConfig); + + // matchfields are a subset of the left and right fields, so they don't contribute to the size of the references set. + assertEquals(2 * numMatchFields, join.references().size()); + + AttributeSet refs = join.references(); + assertTrue(refs.containsAll(matchFields)); + assertTrue(refs.containsAll(leftAttributes)); + assertTrue(refs.containsAll(rightAttributes)); + + Set exprs = Set.copyOf(join.expressions()); + assertTrue(exprs.containsAll(matchFields)); + assertTrue(exprs.containsAll(leftAttributes)); + assertTrue(exprs.containsAll(rightAttributes)); + } + + public void testTransformExprs() { + int numMatchFields = between(1, 10); + + List matchFields = new ArrayList<>(numMatchFields); + List leftFields = new ArrayList<>(numMatchFields); + List leftAttributes = new ArrayList<>(numMatchFields); + List rightFields = new ArrayList<>(numMatchFields); + List rightAttributes = new ArrayList<>(numMatchFields); + + for (int i = 0; i < numMatchFields; i++) { + Alias left = aliasForLiteral("left" + i); + Alias right = aliasForLiteral("right" + i); + + leftFields.add(left); + leftAttributes.add(left.toAttribute()); + rightFields.add(right); + rightAttributes.add(right.toAttribute()); + matchFields.add(randomBoolean() ? left.toAttribute() : right.toAttribute()); + } + + Row left = new Row(Source.EMPTY, leftFields); + Row right = new Row(Source.EMPTY, rightFields); + + JoinConfig joinConfig = new JoinConfig(JoinType.LEFT, matchFields, leftAttributes, rightAttributes); + Join join = new Join(Source.EMPTY, left, right, joinConfig); + assertTrue(join.config().matchFields().stream().allMatch(ref -> ref.dataType().equals(DataType.INTEGER))); + + Join transformedJoin = (Join) join.transformExpressionsOnly(Attribute.class, attr -> attr.withDataType(DataType.BOOLEAN)); + assertTrue(transformedJoin.config().matchFields().stream().allMatch(ref -> ref.dataType().equals(DataType.BOOLEAN))); + } + + private static Alias aliasForLiteral(String name) { + return new Alias(Source.EMPTY, name, new Literal(Source.EMPTY, 1, DataType.INTEGER)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java new file mode 100644 index 0000000000000..c93f3b9e0e350 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.TaskCancellationService; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.tasks.MockTaskManager.SPY_TASK_MANAGER_SETTING; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; + +public class ComputeListenerTests extends ESTestCase { + private ThreadPool threadPool; + private TransportService transportService; + + @Before + public void setUpTransportService() { + threadPool = new TestThreadPool(getTestName()); + transportService = MockTransportService.createNewService( + Settings.builder().put(SPY_TASK_MANAGER_SETTING.getKey(), true).build(), + VersionInformation.CURRENT, + TransportVersionUtils.randomVersion(), + threadPool + ); + transportService.start(); + TaskCancellationService cancellationService = new TaskCancellationService(transportService); + transportService.getTaskManager().setTaskCancellationService(cancellationService); + Mockito.clearInvocations(transportService.getTaskManager()); + } + + @After + public void shutdownTransportService() { + transportService.close(); + terminate(threadPool); + } + + private CancellableTask newTask() { + return new CancellableTask( + randomIntBetween(1, 100), + "test-type", + "test-action", + "test-description", + TaskId.EMPTY_TASK_ID, + Map.of() + ); + } + + private ComputeResponse randomResponse() { + int numProfiles = randomIntBetween(0, 2); + List profiles = new ArrayList<>(numProfiles); + for (int i = 0; i < numProfiles; i++) { + profiles.add(new DriverProfile(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), List.of())); + } + return new ComputeResponse(profiles); + } + + public void testEmpty() { + PlainActionFuture results = new PlainActionFuture<>(); + try (ComputeListener ignored = new ComputeListener(transportService, newTask(), results)) { + assertFalse(results.isDone()); + } + assertTrue(results.isDone()); + assertThat(results.actionGet(10, TimeUnit.SECONDS).getProfiles(), empty()); + } + + public void testCollectComputeResults() { + PlainActionFuture future = new PlainActionFuture<>(); + List allProfiles = new ArrayList<>(); + try (ComputeListener computeListener = new ComputeListener(transportService, newTask(), future)) { + int tasks = randomIntBetween(1, 100); + for (int t = 0; t < tasks; t++) { + if (randomBoolean()) { + ActionListener subListener = computeListener.acquireAvoid(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(null)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } else { + ComputeResponse resp = randomResponse(); + allProfiles.addAll(resp.getProfiles()); + ActionListener subListener = computeListener.acquireCompute(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(resp)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } + } + } + ComputeResponse result = future.actionGet(10, TimeUnit.SECONDS); + assertThat( + result.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) + ); + Mockito.verifyNoInteractions(transportService.getTaskManager()); + } + + public void testCancelOnFailure() throws Exception { + Queue rootCauseExceptions = ConcurrentCollections.newQueue(); + IntStream.range(0, between(1, 100)) + .forEach( + n -> rootCauseExceptions.add(new CircuitBreakingException("breaking exception " + n, CircuitBreaker.Durability.TRANSIENT)) + ); + int successTasks = between(1, 50); + int failedTasks = between(1, 100); + PlainActionFuture rootListener = new PlainActionFuture<>(); + CancellableTask rootTask = newTask(); + try (ComputeListener computeListener = new ComputeListener(transportService, rootTask, rootListener)) { + for (int i = 0; i < successTasks; i++) { + ActionListener subListener = computeListener.acquireCompute(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(randomResponse())), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } + for (int i = 0; i < failedTasks; i++) { + ActionListener subListener = randomBoolean() ? computeListener.acquireAvoid() : computeListener.acquireCompute(); + threadPool.schedule(ActionRunnable.wrap(subListener, l -> { + Exception ex = rootCauseExceptions.poll(); + if (ex == null) { + ex = new TaskCancelledException("task was cancelled"); + } + l.onFailure(ex); + }), TimeValue.timeValueNanos(between(0, 100)), threadPool.generic()); + } + } + assertBusy(rootListener::isDone); + ExecutionException failure = expectThrows(ExecutionException.class, () -> rootListener.get(1, TimeUnit.SECONDS)); + Throwable cause = failure.getCause(); + assertNotNull(failure); + assertThat(cause, instanceOf(CircuitBreakingException.class)); + assertThat(failure.getSuppressed().length, lessThan(10)); + Mockito.verify(transportService.getTaskManager(), Mockito.times(1)) + .cancelTaskAndDescendants(eq(rootTask), eq("cancelled on failure"), eq(false), any()); + } + + public void testCollectWarnings() throws Exception { + List allProfiles = new ArrayList<>(); + Map> allWarnings = new HashMap<>(); + ActionListener rootListener = new ActionListener<>() { + @Override + public void onResponse(ComputeResponse result) { + assertThat( + result.getProfiles().stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum)), + equalTo(allProfiles.stream().collect(Collectors.toMap(p -> p, p -> 1, Integer::sum))) + ); + Map> responseHeaders = threadPool.getThreadContext() + .getResponseHeaders() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new HashSet<>(e.getValue()))); + assertThat(responseHeaders, equalTo(allWarnings)); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }; + CountDownLatch latch = new CountDownLatch(1); + try ( + ComputeListener computeListener = new ComputeListener( + transportService, + newTask(), + ActionListener.runAfter(rootListener, latch::countDown) + ) + ) { + int tasks = randomIntBetween(1, 100); + for (int t = 0; t < tasks; t++) { + if (randomBoolean()) { + ActionListener subListener = computeListener.acquireAvoid(); + threadPool.schedule( + ActionRunnable.wrap(subListener, l -> l.onResponse(null)), + TimeValue.timeValueNanos(between(0, 100)), + threadPool.generic() + ); + } else { + ComputeResponse resp = randomResponse(); + allProfiles.addAll(resp.getProfiles()); + int numWarnings = randomIntBetween(1, 5); + Map warnings = new HashMap<>(); + for (int i = 0; i < numWarnings; i++) { + warnings.put("key" + between(1, 10), "value" + between(1, 10)); + } + for (Map.Entry e : warnings.entrySet()) { + allWarnings.computeIfAbsent(e.getKey(), v -> new HashSet<>()).add(e.getValue()); + } + ActionListener subListener = computeListener.acquireCompute(); + threadPool.schedule(ActionRunnable.wrap(subListener, l -> { + for (Map.Entry e : warnings.entrySet()) { + threadPool.getThreadContext().addResponseHeader(e.getKey(), e.getValue()); + } + l.onResponse(resp); + }), TimeValue.timeValueNanos(between(0, 100)), threadPool.generic()); + } + } + } + assertTrue(latch.await(10, TimeUnit.SECONDS)); + Mockito.verifyNoInteractions(transportService.getTaskManager()); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java index dde39b66664de..06c6b5de3cdea 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java @@ -19,10 +19,8 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; -import org.elasticsearch.xpack.esql.core.expression.function.FunctionRegistry; import org.elasticsearch.xpack.esql.core.index.EsIndex; import org.elasticsearch.xpack.esql.core.index.IndexResolution; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; @@ -30,6 +28,7 @@ import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer; import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.Mapper; import org.elasticsearch.xpack.esql.session.EsqlConfigurationSerializationTests; @@ -224,7 +223,7 @@ static LogicalPlan parse(String query) { static PhysicalPlan mapAndMaybeOptimize(LogicalPlan logicalPlan) { var physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(TEST_CFG)); - FunctionRegistry functionRegistry = new EsqlFunctionRegistry(); + EsqlFunctionRegistry functionRegistry = new EsqlFunctionRegistry(); var mapper = new Mapper(functionRegistry); var physical = mapper.map(logicalPlan); if (randomBoolean()) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index 5c794d707f5f4..f26e819685789 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -230,10 +230,16 @@ private void testCase( if (rewritesToMatchNone != YesNoSometimes.SOMETIMES) { assertThat(builder.stats().noNextScorer(), equalTo(0)); } + assertEqualsAndHashcodeStable(query, rewritten.toQuery(ctx)); } } } + private void assertEqualsAndHashcodeStable(Query query1, Query query2) { + assertEquals(query1, query2); + assertEquals(query1.hashCode(), query2.hashCode()); + } + private record StandardSetup(String fieldType, boolean multivaluedField, boolean empty, int count) implements Setup { @Override public XContentBuilder mapping(XContentBuilder builder) throws IOException { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index 17dca8096de0f..925601bded425 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -1212,6 +1212,14 @@ public void testEnrichOnDefaultFieldWithKeep() { assertThat(fieldNames, equalTo(Set.of("emp_no", "emp_no.*", "language_name", "language_name.*"))); } + public void testDissectOverwriteName() { + Set fieldNames = EsqlSession.fieldNames(parser.createStatement(""" + from employees + | dissect first_name "%{first_name} %{more}" + | keep emp_no, first_name, more"""), Set.of()); + assertThat(fieldNames, equalTo(Set.of("emp_no", "emp_no.*", "first_name", "first_name.*"))); + } + public void testEnrichOnDefaultField() { Set fieldNames = EsqlSession.fieldNames(parser.createStatement(""" from employees diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index 5883d41f32125..427c30311df0b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.IndexResolver; +import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import org.junit.After; import org.junit.Before; @@ -33,6 +34,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.BiConsumer; import static org.elasticsearch.xpack.esql.EsqlTestUtils.withDefaultLimitWarning; import static org.hamcrest.Matchers.instanceOf; @@ -100,9 +102,10 @@ public void testFailedMetric() { var request = new EsqlQueryRequest(); // test a failed query: xyz field doesn't exist request.query("from test | stats m = max(xyz)"); - planExecutor.esql(request, randomAlphaOfLength(10), EsqlTestUtils.TEST_CFG, enrichResolver, new ActionListener<>() { + BiConsumer> runPhase = (p, r) -> fail("this shouldn't happen"); + planExecutor.esql(request, randomAlphaOfLength(10), EsqlTestUtils.TEST_CFG, enrichResolver, runPhase, new ActionListener<>() { @Override - public void onResponse(PhysicalPlan physicalPlan) { + public void onResponse(Result result) { fail("this shouldn't happen"); } @@ -119,9 +122,10 @@ public void onFailure(Exception e) { // fix the failing query: foo field does exist request.query("from test | stats m = max(foo)"); - planExecutor.esql(request, randomAlphaOfLength(10), EsqlTestUtils.TEST_CFG, enrichResolver, new ActionListener<>() { + runPhase = (p, r) -> r.onResponse(null); + planExecutor.esql(request, randomAlphaOfLength(10), EsqlTestUtils.TEST_CFG, enrichResolver, runPhase, new ActionListener<>() { @Override - public void onResponse(PhysicalPlan physicalPlan) {} + public void onResponse(Result result) {} @Override public void onFailure(Exception e) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java index e50ba59a31b2d..fa20cfdec0ca0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java @@ -9,47 +9,125 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.common.Strings; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.dissect.DissectParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.core.capabilities.UnresolvedException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.Order; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttributeTests; import org.elasticsearch.xpack.esql.core.expression.UnresolvedNamedExpression; -import org.elasticsearch.xpack.esql.core.expression.function.UnresolvedFunction; -import org.elasticsearch.xpack.esql.core.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.core.expression.function.Function; +import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.FullTextPredicate; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.Like; +import org.elasticsearch.xpack.esql.core.expression.predicate.regex.LikePattern; +import org.elasticsearch.xpack.esql.core.tree.AbstractNodeTestCase; import org.elasticsearch.xpack.esql.core.tree.Node; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.NodeSubclassTests; +import org.elasticsearch.xpack.esql.core.tree.NodeTests; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.tree.SourceTests; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.Stat; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.StatsType; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.mockito.exceptions.base.MockitoException; import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.WildcardType; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.function.Consumer; import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.jar.JarEntry; +import java.util.jar.JarInputStream; -public class EsqlNodeSubclassTests> extends NodeSubclassTests { +import static java.util.Collections.emptyList; +import static org.mockito.Mockito.mock; + +/** + * Looks for all subclasses of {@link Node} and verifies that they + * implement {@code Node.info} and + * {@link Node#replaceChildren(List)} sanely. It'd be better if + * each subclass had its own test case that verified those methods + * and any other interesting things that they do, but we're a + * long way from that and this gets the job done for now. + *

      + * This test attempts to use reflection to create believable nodes + * and manipulate them in believable ways with as little knowledge + * of the actual subclasses as possible. This is problematic because + * it is possible, for example, for nodes to stackoverflow because + * they can contain themselves. So this class + * does have some {@link Node}-subclass-specific + * knowledge. As little as I could get away with though. + *

      + * When there are actual tests for a subclass of {@linkplain Node} + * then this class will do two things: + *

        + *
      • Skip running any tests for that subclass entirely. + *
      • Delegate to that test to build nodes of that type when a + * node of that type is called for. + *
      + */ +public class EsqlNodeSubclassTests> extends NodeSubclassTests { + private static final Predicate CLASSNAME_FILTER = className -> { + boolean esqlCore = className.startsWith("org.elasticsearch.xpack.esql.core") != false; + boolean esqlProper = className.startsWith("org.elasticsearch.xpack.esql") != false; + return esqlCore || esqlProper; + }; + + /** + * Scans the {@code .class} files to identify all classes and checks if + * they are subclasses of {@link Node}. + */ + @ParametersFactory(argumentFormatting = "%1s") + @SuppressWarnings("rawtypes") + public static List nodeSubclasses() throws IOException { + return subclassesOf(Node.class, CLASSNAME_FILTER).stream() + .filter(c -> testClassFor(c) == null) + .map(c -> new Object[] { c }) + .toList(); + } private static final List> CLASSES_WITH_MIN_TWO_CHILDREN = List.of(Concat.class, CIDRMatch.class); @@ -61,13 +139,276 @@ public class EsqlNodeSubclassTests> extends NodeS UnresolvedNamedExpression.class ); + private final Class subclass; + public EsqlNodeSubclassTests(Class subclass) { - super(subclass); + this.subclass = subclass; + } + + public void testInfoParameters() throws Exception { + Constructor ctor = longestCtor(subclass); + Object[] nodeCtorArgs = ctorArgs(ctor); + T node = ctor.newInstance(nodeCtorArgs); + /* + * The count should be the same size as the longest constructor + * by convention. If it isn't then we're missing something. + */ + int expectedCount = ctor.getParameterCount(); + /* + * Except the first `Location` argument of the ctor is implicit + * in the parameters and not included. + */ + expectedCount -= 1; + assertEquals(expectedCount, info(node).properties().size()); + } + + /** + * Test {@code Node.transformPropertiesOnly} + * implementation on {@link #subclass} which tests the implementation of + * {@code Node.info}. And tests the actual {@link NodeInfo} subclass + * implementations in the process. + */ + public void testTransform() throws Exception { + Constructor ctor = longestCtor(subclass); + Object[] nodeCtorArgs = ctorArgs(ctor); + T node = ctor.newInstance(nodeCtorArgs); + + Type[] argTypes = ctor.getGenericParameterTypes(); + // start at 1 because we can't change Location. + for (int changedArgOffset = 1; changedArgOffset < ctor.getParameterCount(); changedArgOffset++) { + Object originalArgValue = nodeCtorArgs[changedArgOffset]; + + Type changedArgType = argTypes[changedArgOffset]; + Object changedArgValue = randomValueOtherThanMaxTries( + nodeCtorArgs[changedArgOffset], + () -> makeArg(changedArgType), + // JoinType has only 1 permitted enum element. Limit the number of retries. + 3 + ); + + B transformed = transformNodeProps(node, Object.class, prop -> Objects.equals(prop, originalArgValue) ? changedArgValue : prop); + + if (node.children().contains(originalArgValue) || node.children().equals(originalArgValue)) { + if (node.children().equals(emptyList()) && originalArgValue.equals(emptyList())) { + /* + * If the children are an empty list and the value + * we want to change is an empty list they'll be + * equal to one another so they'll come on this branch. + * This case is rare and hard to reason about so we're + * just going to assert nothing here and hope to catch + * it when we write non-reflection hack tests. + */ + continue; + } + // Transformation shouldn't apply to children. + assertSame(node, transformed); + } else { + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, changedArgValue); + } + } + } + + /** + * Test {@link Node#replaceChildren(List)} implementation on {@link #subclass}. + */ + public void testReplaceChildren() throws Exception { + Constructor ctor = longestCtor(subclass); + Object[] nodeCtorArgs = ctorArgs(ctor); + T node = ctor.newInstance(nodeCtorArgs); + + Type[] argTypes = ctor.getGenericParameterTypes(); + // start at 1 because we can't change Location. + for (int changedArgOffset = 1; changedArgOffset < ctor.getParameterCount(); changedArgOffset++) { + Object originalArgValue = nodeCtorArgs[changedArgOffset]; + Type changedArgType = argTypes[changedArgOffset]; + + if (originalArgValue instanceof Collection col) { + + if (col.isEmpty() || col instanceof EnumSet) { + /* + * We skip empty lists here because they'll spuriously + * pass the conditions below if statements even if they don't + * have anything to do with children. This might cause us to + * ignore the case where a parameter gets copied into the + * children and just happens to be empty but I don't really + * know another way. + */ + + continue; + } + + if (col instanceof List originalList && node.children().equals(originalList)) { + // The arg we're looking at *is* the children + @SuppressWarnings("unchecked") // we pass a reasonable type so get reasonable results + List newChildren = (List) makeListOfSameSizeOtherThan(changedArgType, originalList); + B transformed = node.replaceChildren(newChildren); + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newChildren); + } else if (false == col.isEmpty() && node.children().containsAll(col)) { + // The arg we're looking at is a collection contained within the children + List originalList = (List) originalArgValue; + + // First make the new children + @SuppressWarnings("unchecked") // we pass a reasonable type so get reasonable results + List newCollection = (List) makeListOfSameSizeOtherThan(changedArgType, originalList); + + // Now merge that list of children into the original list of children + List originalChildren = node.children(); + List newChildren = new ArrayList<>(originalChildren.size()); + int originalOffset = 0; + for (int i = 0; i < originalChildren.size(); i++) { + if (originalOffset < originalList.size() && originalChildren.get(i).equals(originalList.get(originalOffset))) { + newChildren.add(newCollection.get(originalOffset)); + originalOffset++; + } else { + newChildren.add(originalChildren.get(i)); + } + } + + // Finally! We can assert..... + B transformed = node.replaceChildren(newChildren); + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newCollection); + } else { + // The arg we're looking at has nothing to do with the children + } + } else { + if (node.children().contains(originalArgValue)) { + // The arg we're looking at is one of the children + List newChildren = new ArrayList<>(node.children()); + @SuppressWarnings("unchecked") // makeArg produced reasonable values + B newChild = (B) randomValueOtherThan(nodeCtorArgs[changedArgOffset], () -> makeArg(changedArgType)); + newChildren.replaceAll(e -> Objects.equals(originalArgValue, e) ? newChild : e); + B transformed = node.replaceChildren(newChildren); + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newChild); + } else { + // The arg we're looking at has nothing to do with the children + } + } + } + } + + /** + * Build a list of arguments to use when calling + * {@code ctor} that make sense when {@code ctor} + * builds subclasses of {@link Node}. + */ + private Object[] ctorArgs(Constructor> ctor) throws Exception { + Type[] argTypes = ctor.getGenericParameterTypes(); + Object[] args = new Object[argTypes.length]; + for (int i = 0; i < argTypes.length; i++) { + final int currentArgIndex = i; + args[i] = randomValueOtherThanMany(candidate -> { + for (int a = 0; a < currentArgIndex; a++) { + if (Objects.equals(args[a], candidate)) { + return true; + } + } + return false; + }, () -> { + try { + return makeArg(ctor.getDeclaringClass(), argTypes[currentArgIndex]); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + return args; + } + + /** + * Make an argument to feed the {@link #subclass}'s ctor. + */ + protected Object makeArg(Type argType) { + try { + return makeArg(subclass, argType); + } catch (Exception e) { + // Wrap to make `randomValueOtherThan` happy. + throw new RuntimeException(e); + } } - @Override - protected Object pluggableMakeArg(Class> toBuildClass, Class argClass) throws Exception { - if (argClass == Dissect.Parser.class) { + /** + * Make an argument to feed to the constructor for {@code toBuildClass}. + */ + @SuppressWarnings("unchecked") + private Object makeArg(Class> toBuildClass, Type argType) throws Exception { + + if (argType instanceof ParameterizedType pt) { + if (pt.getRawType() == Map.class) { + return makeMap(toBuildClass, pt); + } + if (pt.getRawType() == List.class) { + return makeList(toBuildClass, pt); + } + if (pt.getRawType() == Set.class) { + return makeSet(toBuildClass, pt); + } + if (pt.getRawType() == EnumSet.class) { + @SuppressWarnings("rawtypes") + Enum enm = (Enum) makeArg(toBuildClass, pt.getActualTypeArguments()[0]); + return EnumSet.of(enm); + } + if (toBuildClass == OutputExec.class && pt.getRawType() == Consumer.class) { + // pageConsumer just needs a BiConsumer. But the consumer has to have reasonable + // `equals` for randomValueOtherThan, so we just ensure that a new instance is + // created each time which uses Object::equals identity. + return new Consumer() { + @Override + public void accept(Page page) { + // do nothing + } + }; + } + + throw new IllegalArgumentException("Unsupported parameterized type [" + pt + "], for " + toBuildClass.getSimpleName()); + } + if (argType instanceof WildcardType wt) { + if (wt.getLowerBounds().length > 0 || wt.getUpperBounds().length > 1) { + throw new IllegalArgumentException("Unsupported wildcard type [" + wt + "]"); + } + return makeArg(toBuildClass, wt.getUpperBounds()[0]); + } + Class argClass = (Class) argType; + + /* + * Sometimes all of the required type information isn't in the ctor + * so we have to hard code it here. + */ + if (toBuildClass == FieldAttribute.class) { + // `parent` is nullable. + if (argClass == FieldAttribute.class && randomBoolean()) { + return null; + } + } else if (toBuildClass == NodeTests.ChildrenAreAProperty.class) { + /* + * While any subclass of DummyFunction will do here we want to prevent + * stack overflow so we use the one without children. + */ + if (argClass == NodeTests.Dummy.class) { + return makeNode(NodeTests.NoChildren.class); + } + } else if (FullTextPredicate.class.isAssignableFrom(toBuildClass)) { + /* + * FullTextPredicate analyzes its string arguments on + * construction so they have to be valid. + */ + if (argClass == String.class) { + int size = between(0, 5); + StringBuilder b = new StringBuilder(); + for (int i = 0; i < size; i++) { + if (i != 0) { + b.append(';'); + } + b.append(randomAlphaOfLength(5)).append('=').append(randomAlphaOfLength(5)); + } + return b.toString(); + } + } else if (toBuildClass == Like.class) { + + if (argClass == LikePattern.class) { + return new LikePattern(randomAlphaOfLength(16), randomFrom('\\', '|', '/', '`')); + } + + } else if (argClass == Dissect.Parser.class) { // Dissect.Parser is a record / final, cannot be mocked String pattern = randomDissectPattern(); String appendSeparator = randomAlphaOfLength(16); @@ -86,54 +427,206 @@ protected Object pluggableMakeArg(Class> toBuildClass, Class (NamedExpression) makeArg(NamedExpression.class)), - randomList(0, 10, () -> (Expression) makeArg(Expression.class)) - ); + } else if (argClass == JoinType.class) { + return JoinType.LEFT; + } + if (Expression.class == argClass) { + /* + * Rather than use any old subclass of expression lets + * use a simple one. Without this we're very prone to + * stackoverflow errors while building the tree. + */ + return UnresolvedAttributeTests.randomUnresolvedAttribute(); + } + if (EnrichPolicy.class == argClass) { + List enrichFields = randomSubsetOf(List.of("e1", "e2", "e3")); + return new EnrichPolicy(randomFrom("match", "range"), null, List.of(), randomFrom("m1", "m2"), enrichFields); + } + + if (Node.class.isAssignableFrom(argClass)) { + /* + * Rather than attempting to mock subclasses of node + * and emulate them we just try and instantiate an + * appropriate subclass + */ + @SuppressWarnings("unchecked") // safe because this is the lowest possible bounds for Node + Class> asNodeSubclass = (Class>) argType; + return makeNode(asNodeSubclass); + } + + if (argClass.isEnum()) { + // Can't mock enums but luckily we can just pick one + return randomFrom(argClass.getEnumConstants()); + } + if (argClass == boolean.class) { + // Can't mock primitives.... + return randomBoolean(); + } + if (argClass == int.class) { + return randomInt(); + } + if (argClass == String.class) { + // Nor strings + return randomAlphaOfLength(5); + } + if (argClass == Source.class) { + // Location is final and can't be mocked but we have a handy method to generate ones. + return SourceTests.randomSource(); + } + if (argClass == ZoneId.class) { + // ZoneId is a sealed class (cannot be mocked) starting with Java 19 + return randomZone(); + } + try { + return mock(argClass); + } catch (MockitoException e) { + throw new RuntimeException("failed to mock [" + argClass.getName() + "] for [" + toBuildClass.getName() + "]", e); } + } - return null; + private List makeList(Class> toBuildClass, ParameterizedType listType) throws Exception { + return makeList(toBuildClass, listType, randomSizeForCollection(toBuildClass)); } - @Override - protected Object pluggableMakeParameterizedArg(Class> toBuildClass, ParameterizedType pt) { - if (toBuildClass == OutputExec.class && pt.getRawType() == Consumer.class) { - // pageConsumer just needs a BiConsumer. But the consumer has to have reasonable - // `equals` for randomValueOtherThan, so we just ensure that a new instance is - // created each time which uses Object::equals identity. - return new Consumer() { - @Override - public void accept(Page page) { - // do nothing - } - }; + private List makeList(Class> toBuildClass, ParameterizedType listType, int size) throws Exception { + List list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add(makeArg(toBuildClass, listType.getActualTypeArguments()[0])); } - return null; + return list; } - @Override - protected boolean hasAtLeastTwoChildren(Class> toBuildClass) { - return CLASSES_WITH_MIN_TWO_CHILDREN.stream().anyMatch(toBuildClass::equals); + private Set makeSet(Class> toBuildClass, ParameterizedType listType) throws Exception { + return makeSet(toBuildClass, listType, randomSizeForCollection(toBuildClass)); } - static final Predicate CLASSNAME_FILTER = className -> (className.startsWith("org.elasticsearch.xpack.esql.core") != false - || className.startsWith("org.elasticsearch.xpack.esql") != false); + private Set makeSet(Class> toBuildClass, ParameterizedType listType, int size) throws Exception { + Set list = new HashSet<>(); + for (int i = 0; i < size; i++) { + list.add(makeArg(toBuildClass, listType.getActualTypeArguments()[0])); + } + return list; + } - @Override - protected Predicate pluggableClassNameFilter() { - return CLASSNAME_FILTER; + private Object makeMap(Class> toBuildClass, ParameterizedType pt) throws Exception { + Map map = new HashMap<>(); + int size = randomSizeForCollection(toBuildClass); + while (map.size() < size) { + Object key = makeArg(toBuildClass, pt.getActualTypeArguments()[0]); + Object value = makeArg(toBuildClass, pt.getActualTypeArguments()[1]); + map.put(key, value); + } + return map; } - /** Scans the {@code .class} files to identify all classes and checks if they are subclasses of {@link Node}. */ - @ParametersFactory(argumentFormatting = "%1s") - @SuppressWarnings("rawtypes") - public static List nodeSubclasses() throws IOException { - return subclassesOf(Node.class, CLASSNAME_FILTER).stream() - .filter(c -> testClassFor(c) == null) - .map(c -> new Object[] { c }) - .toList(); + private int randomSizeForCollection(Class> toBuildClass) { + int minCollectionLength = 0; + int maxCollectionLength = 10; + + if (hasAtLeastTwoChildren(toBuildClass)) { + minCollectionLength = 2; + } + return between(minCollectionLength, maxCollectionLength); + } + + private List makeListOfSameSizeOtherThan(Type listType, List original) throws Exception { + if (original.isEmpty()) { + throw new IllegalArgumentException("Can't make a different empty list"); + } + return randomValueOtherThan(original, () -> { + try { + return makeList(subclass, (ParameterizedType) listType, original.size()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + } + + public > T makeNode(Class nodeClass) throws Exception { + if (Modifier.isAbstract(nodeClass.getModifiers())) { + nodeClass = randomFrom(innerSubclassesOf(nodeClass)); + } + Class testSubclassFor = testClassFor(nodeClass); + if (testSubclassFor != null) { + // Delegate to the test class for a node if there is one + Method m = testSubclassFor.getMethod("random" + Strings.capitalize(nodeClass.getSimpleName())); + assert Modifier.isStatic(m.getModifiers()) : "Expected static method, got:" + m; + return nodeClass.cast(m.invoke(null)); + } + Constructor ctor = longestCtor(nodeClass); + Object[] nodeCtorArgs = ctorArgs(ctor); + return ctor.newInstance(nodeCtorArgs); + } + + private void assertTransformedOrReplacedChildren( + T node, + B transformed, + Constructor ctor, + Object[] nodeCtorArgs, + int changedArgOffset, + Object changedArgValue + ) throws Exception { + if (node instanceof Function) { + /* + * Functions have a weaker definition of transform then other + * things: + * + * Transforming using the way we did above should only change + * the one property of the node that we intended to transform. + */ + assertEquals(node.source(), transformed.source()); + List op = node.nodeProperties(); + List tp = transformed.nodeProperties(); + for (int p = 0; p < op.size(); p++) { + if (p == changedArgOffset - 1) { // -1 because location isn't in the list + assertEquals(changedArgValue, tp.get(p)); + } else { + assertEquals(op.get(p), tp.get(p)); + } + } + } else { + /* + * The stronger assertion for all non-Functions: transforming + * a node changes *only* the transformed value such that you + * can rebuild a copy of the node using its constructor changing + * only one argument and it'll be *equal* to the result of the + * transformation. + */ + Type[] argTypes = ctor.getGenericParameterTypes(); + Object[] args = new Object[argTypes.length]; + for (int i = 0; i < argTypes.length; i++) { + args[i] = nodeCtorArgs[i] == nodeCtorArgs[changedArgOffset] ? changedArgValue : nodeCtorArgs[i]; + } + T reflectionTransformed = ctor.newInstance(args); + assertEquals(reflectionTransformed, transformed); + } + } + + /** + * Find the longest constructor of the given class. + * By convention, for all subclasses of {@link Node}, + * this constructor should have "all" of the state of + * the node. All other constructors should all delegate + * to this constructor. + */ + static Constructor longestCtor(Class clazz) { + Constructor longest = null; + for (Constructor ctor : clazz.getConstructors()) { + if (longest == null || longest.getParameterCount() < ctor.getParameterCount()) { + @SuppressWarnings("unchecked") // Safe because the ctor has to be a ctor for T + Constructor castCtor = (Constructor) ctor; + longest = castCtor; + } + } + if (longest == null) { + throw new IllegalArgumentException("Couldn't find any constructors for [" + clazz.getName() + "]"); + } + return longest; + } + + private boolean hasAtLeastTwoChildren(Class> toBuildClass) { + return CLASSES_WITH_MIN_TWO_CHILDREN.stream().anyMatch(toBuildClass::equals); } static boolean isPlanNodeClass(Class> toBuildClass) { @@ -179,4 +672,132 @@ static EsQueryExec.FieldSort randomFieldSort() { static FieldAttribute field(String name, DataType type) { return new FieldAttribute(Source.EMPTY, name, new EsField(name, type, Collections.emptyMap(), false)); } + + public static Set> subclassesOf(Class clazz) throws IOException { + return subclassesOf(clazz, CLASSNAME_FILTER); + } + + private Set> innerSubclassesOf(Class clazz) throws IOException { + return subclassesOf(clazz, CLASSNAME_FILTER); + } + + /** + * Cache of subclasses. We use a cache because it significantly speeds up + * the test. + */ + private static final Map, Set> subclassCache = new HashMap<>(); + + /** + * Find all subclasses of a particular class. + */ + public static Set> subclassesOf(Class clazz, Predicate classNameFilter) throws IOException { + @SuppressWarnings("unchecked") // The map is built this way + Set> lookup = (Set>) subclassCache.get(clazz); + if (lookup != null) { + return lookup; + } + Set> results = new LinkedHashSet<>(); + String[] paths = System.getProperty("java.class.path").split(System.getProperty("path.separator")); + for (String path : paths) { + Path root = PathUtils.get(path); + int rootLength = root.toString().length() + 1; + + // load classes from jar files + // NIO FileSystem API is not used since it trips the SecurityManager + // https://bugs.openjdk.java.net/browse/JDK-8160798 + // so iterate the jar "by hand" + if (path.endsWith(".jar") && path.contains("x-pack-ql")) { + try (JarInputStream jar = jarStream(root)) { + JarEntry je = null; + while ((je = jar.getNextJarEntry()) != null) { + String name = je.getName(); + if (name.endsWith(".class")) { + String className = name.substring(0, name.length() - ".class".length()).replace("/", "."); + maybeLoadClass(clazz, className, root + "!/" + name, classNameFilter, results); + } + } + } + } + // for folders, just use the FileSystems API + else { + Files.walkFileTree(root, new SimpleFileVisitor<>() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { + String fileName = file.toString(); + // Chop off the root and file extension + String className = fileName.substring(rootLength, fileName.length() - ".class".length()); + // Go from "path" style to class style + className = className.replace(PathUtils.getDefaultFileSystem().getSeparator(), "."); + maybeLoadClass(clazz, className, fileName, classNameFilter, results); + } + return FileVisitResult.CONTINUE; + } + }); + } + } + subclassCache.put(clazz, results); + return results; + } + + @SuppressForbidden(reason = "test reads from jar") + private static JarInputStream jarStream(Path path) throws IOException { + return new JarInputStream(path.toUri().toURL().openStream()); + } + + /** + * Load classes from predefined packages (hack to limit the scope) and if they match the hierarchy, add them to the cache + */ + private static void maybeLoadClass( + Class clazz, + String className, + String location, + Predicate classNameFilter, + Set> results + ) throws IOException { + if (classNameFilter.test(className) == false) { + return; + } + + Class c; + try { + c = Class.forName(className); + } catch (ClassNotFoundException e) { + throw new IOException("Couldn't load " + location, e); + } + + if (false == Modifier.isAbstract(c.getModifiers()) && false == c.isAnonymousClass() && clazz.isAssignableFrom(c)) { + Class s = c.asSubclass(clazz); + results.add(s); + } + } + + /** + * The test class for some subclass of node or {@code null} + * if there isn't such a class or it doesn't extend + * {@link AbstractNodeTestCase}. + */ + protected static Class testClassFor(Class nodeSubclass) { + String testClassName = nodeSubclass.getName() + "Tests"; + try { + Class c = Class.forName(testClassName); + if (AbstractNodeTestCase.class.isAssignableFrom(c)) { + return c; + } + return null; + } catch (ClassNotFoundException e) { + return null; + } + } + + private static T randomValueOtherThanManyMaxTries(Predicate input, Supplier randomSupplier, int maxTries) { + int[] maxTriesHolder = { maxTries }; + Predicate inputWithMaxTries = t -> input.test(t) && maxTriesHolder[0]-- > 0; + + return ESTestCase.randomValueOtherThanMany(inputWithMaxTries, randomSupplier); + } + + public static T randomValueOtherThanMaxTries(T input, Supplier randomSupplier, int maxTries) { + return randomValueOtherThanManyMaxTries(v -> Objects.equals(input, v), randomSupplier, maxTries); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java index 86baee58ca53f..bebfcd7f7bdbc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java @@ -95,7 +95,8 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(UnaryScalarFunction.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.addAll(EsField.getNamedWriteables()); - entries.add(new NamedWriteableRegistry.Entry(MultiTypeEsField.class, "MultiTypeEsField", MultiTypeEsField::new)); + entries.add(MultiTypeEsField.ENTRY); + entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } @@ -112,7 +113,7 @@ protected final MultiTypeEsField copyInstance(MultiTypeEsField instance, Transpo (out, v) -> new PlanStreamOutput(out, new PlanNameRegistry(), config).writeNamedWriteable(v), in -> { PlanStreamInput pin = new PlanStreamInput(in, new PlanNameRegistry(), in.namedWriteableRegistry(), config); - return pin.readNamedWriteable(MultiTypeEsField.class); + return (MultiTypeEsField) pin.readNamedWriteable(EsField.class); }, version ); diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index e378ce06611c6..6d962ec5baceb 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; @@ -44,6 +45,7 @@ import java.time.Instant; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING; @@ -76,8 +78,15 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx createIndex("index", 1, 1); - final DocWriteResponse indexResponse = prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z") - .get(); + String timestampVal = "2010-01-06T02:03:04.567Z"; + String eventIngestedVal = "2010-01-06T02:03:05.567Z"; // one second later + + final DocWriteResponse indexResponse = prepareIndex("index").setSource( + DataStream.TIMESTAMP_FIELD_NAME, + timestampVal, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedVal + ).get(); ensureGreen("index"); @@ -112,20 +121,28 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx ensureYellowAndNoInitializingShards("index"); - final IndexLongFieldRange timestampFieldRange = clusterAdmin().prepareState() + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + final IndexLongFieldRange timestampFieldRange = indexMetadata.getTimestampRange(); + assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(timestampFieldRange.isComplete()); + assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse(timestampVal).toEpochMilli())); + assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse(timestampVal).toEpochMilli())); + + IndexLongFieldRange eventIngestedFieldRange = clusterAdmin().prepareState() .get() .getState() .metadata() .index("index") - .getTimestampRange(); - assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); - assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertTrue(timestampFieldRange.isComplete()); - assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); - assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + .getEventIngestedRange(); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(eventIngestedFieldRange.isComplete()); + assertThat(eventIngestedFieldRange.getMin(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); + assertThat(eventIngestedFieldRange.getMax(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); } - public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception { + public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { internalCluster().startNodes(between(2, 4)); final String locale; @@ -155,6 +172,11 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception jsonBuilder().startObject() .startObject("_doc") .startObject("properties") + .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) + .field("type", "date") + .field("format", "dd LLL yyyy HH:mm:ssX") + .field("locale", locale) + .endObject() .startObject(DataStream.TIMESTAMP_FIELD_NAME) .field("type", "date") .field("format", "dd LLL yyyy HH:mm:ssX") @@ -178,11 +200,11 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ensureGreen("index"); if (randomBoolean()) { - prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date).get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date, IndexMetadata.EVENT_INGESTED_FIELD_NAME, date).get(); } for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } assertAcked( @@ -190,15 +212,129 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); assertBusy(() -> { - final DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(index); + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.getTimestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.getEventIngestedFieldType(); + assertNotNull(eventIngestedFieldType); assertNotNull(timestampFieldType); - timestampFieldTypeFuture.onResponse(timestampFieldType); + future.onResponse( + Map.of( + DataStream.TIMESTAMP_FIELD_NAME, + timestampFieldType, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedFieldType + ) + ); + }); + assertTrue(future.isDone()); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertThat(future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat( + future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().parseMillis(date), + equalTo(1580817683000L) + ); + } + + assertAcked( + client().execute( + FreezeIndexAction.INSTANCE, + new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index").setFreeze(false) + ).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + } + + public void testTimestampOrEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { + internalCluster().startNodes(between(2, 4)); + + final String locale; + final String date; + + switch (between(1, 3)) { + case 1 -> { + locale = ""; + date = "04 Feb 2020 12:01:23Z"; + } + case 2 -> { + locale = "en_GB"; + date = "04 Feb 2020 12:01:23Z"; + } + case 3 -> { + locale = "fr_FR"; + date = "04 févr. 2020 12:01:23Z"; + } + default -> throw new AssertionError("impossible"); + } + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + assertAcked( + prepareCreate("index").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject(timeField) + .field("type", "date") + .field("format", "dd LLL yyyy HH:mm:ssX") + .field("locale", locale) + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + + final Index index = clusterAdmin().prepareState() + .clear() + .setIndices("index") + .setMetadata(true) + .get() + .getState() + .metadata() + .index("index") + .getIndex(); + + ensureGreen("index"); + if (randomBoolean()) { + prepareIndex("index").setSource(timeField, date).get(); + } + + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + + assertAcked( + client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + // final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); + assertBusy(() -> { + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.getTimestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.getEventIngestedFieldType(); + if (timeField == DataStream.TIMESTAMP_FIELD_NAME) { + assertNotNull(timestampFieldType); + assertNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, timestampFieldType)); + } else { + assertNull(timestampFieldType); + assertNotNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, eventIngestedFieldType)); + } }); - assertTrue(timestampFieldTypeFuture.isDone()); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().locale().toString(), equalTo(locale)); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertTrue(future.isDone()); + assertThat(future.get().get(timeField).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(timeField).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); } assertAcked( @@ -209,7 +345,7 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index 92d042a98b16e..ccb917c9dbda5 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -228,6 +228,7 @@ public void testSearchAndGetAPIsAreThrottled() throws IOException { public void testFreezeAndUnfreeze() { final IndexService originalIndexService = createIndex("index", Settings.builder().put("index.number_of_shards", 2).build()); assertThat(originalIndexService.getMetadata().getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(originalIndexService.getMetadata().getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); @@ -250,6 +251,7 @@ public void testFreezeAndUnfreeze() { IndexShard shard = indexService.getShard(0); assertEquals(0, shard.refreshStats().getTotal()); assertThat(indexService.getMetadata().getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(indexService.getMetadata().getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); } assertAcked( client().execute( @@ -268,6 +270,7 @@ public void testFreezeAndUnfreeze() { Engine engine = IndexShardTestCase.getEngine(shard); assertThat(engine, Matchers.instanceOf(InternalEngine.class)); assertThat(indexService.getMetadata().getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(indexService.getMetadata().getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); } prepareIndex("index").setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); } @@ -671,17 +674,15 @@ public void testComputesTimestampRangeFromMilliseconds() { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() ); - final IndexLongFieldRange timestampFieldRange = clusterAdmin().prepareState() - .get() - .getState() - .metadata() - .index("index") - .getTimestampRange(); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + final IndexLongFieldRange timestampFieldRange = indexMetadata.getTimestampRange(); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertTrue(timestampFieldRange.isComplete()); assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse("2010-01-05T01:02:03.456Z").toEpochMilli())); assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + + assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); } public void testComputesTimestampRangeFromNanoseconds() throws IOException { @@ -705,18 +706,98 @@ public void testComputesTimestampRangeFromNanoseconds() throws IOException { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() ); - final IndexLongFieldRange timestampFieldRange = clusterAdmin().prepareState() - .get() - .getState() - .metadata() - .index("index") - .getTimestampRange(); + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + final IndexLongFieldRange timestampFieldRange = indexMetadata.getTimestampRange(); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertTrue(timestampFieldRange.isComplete()); final DateFieldMapper.Resolution resolution = DateFieldMapper.Resolution.NANOSECONDS; assertThat(timestampFieldRange.getMin(), equalTo(resolution.convert(Instant.parse("2010-01-05T01:02:03.456789012Z")))); assertThat(timestampFieldRange.getMax(), equalTo(resolution.convert(Instant.parse("2010-01-06T02:03:04.567890123Z")))); + + assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); } + public void testComputesEventIngestedRangeFromMilliseconds() { + final int shardCount = between(1, 3); + createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount).build()); + prepareIndex("index").setSource(IndexMetadata.EVENT_INGESTED_FIELD_NAME, "2010-01-05T01:02:03.456Z").get(); + prepareIndex("index").setSource(IndexMetadata.EVENT_INGESTED_FIELD_NAME, "2010-01-06T02:03:04.567Z").get(); + + assertAcked( + client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() + ); + + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + final IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(eventIngestedRange.isComplete()); + assertThat(eventIngestedRange.getMin(), equalTo(Instant.parse("2010-01-05T01:02:03.456Z").toEpochMilli())); + assertThat(eventIngestedRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + + assertThat(indexMetadata.getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + } + + public void testComputesEventIngestedRangeFromNanoseconds() throws IOException { + + final XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) + .field("type", "date_nanos") + .field("format", "strict_date_optional_time_nanos") + .endObject() + .endObject() + .endObject(); + + final int shardCount = between(1, 3); + createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount).build(), mapping); + prepareIndex("index").setSource(IndexMetadata.EVENT_INGESTED_FIELD_NAME, "2010-01-05T01:02:03.456789012Z").get(); + prepareIndex("index").setSource(IndexMetadata.EVENT_INGESTED_FIELD_NAME, "2010-01-06T02:03:04.567890123Z").get(); + + assertAcked( + client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() + ); + + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + final IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(eventIngestedRange.isComplete()); + final DateFieldMapper.Resolution resolution = DateFieldMapper.Resolution.NANOSECONDS; + assertThat(eventIngestedRange.getMin(), equalTo(resolution.convert(Instant.parse("2010-01-05T01:02:03.456789012Z")))); + assertThat(eventIngestedRange.getMax(), equalTo(resolution.convert(Instant.parse("2010-01-06T02:03:04.567890123Z")))); + + assertThat(indexMetadata.getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + } + + public void testComputesEventIngestedAndTimestampRangesWhenBothPresent() { + final int shardCount = between(1, 3); + createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount).build()); + prepareIndex("index").setSource(IndexMetadata.EVENT_INGESTED_FIELD_NAME, "2010-01-05T01:02:03.456Z").get(); + prepareIndex("index").setSource(IndexMetadata.EVENT_INGESTED_FIELD_NAME, "2010-01-06T02:03:04.567Z").get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-05T01:55:03.456Z").get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:55:04.567Z").get(); + + assertAcked( + client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() + ); + + IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + + final IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(eventIngestedRange.isComplete()); + assertThat(eventIngestedRange.getMin(), equalTo(Instant.parse("2010-01-05T01:02:03.456Z").toEpochMilli())); + assertThat(eventIngestedRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + + final IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + assertThat(timestampRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(timestampRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(timestampRange.isComplete()); + assertThat(timestampRange.getMin(), equalTo(Instant.parse("2010-01-05T01:55:03.456Z").toEpochMilli())); + assertThat(timestampRange.getMax(), equalTo(Instant.parse("2010-01-06T02:55:04.567Z").toEpochMilli())); + } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java index b37eb8f99f52c..3f3285c5c2bd7 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java @@ -56,19 +56,23 @@ import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.ilm.UnfollowAction; import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; @@ -84,35 +88,34 @@ public void testDependencies() { protected NamedXContentRegistry xContentRegistry() { List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); - entries.addAll( - Arrays.asList( - new NamedXContentRegistry.Entry( - LifecycleType.class, - new ParseField(TimeseriesLifecycleType.TYPE), - (p) -> TimeseriesLifecycleType.INSTANCE - ), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), - new NamedXContentRegistry.Entry( - LifecycleAction.class, - new ParseField(WaitForSnapshotAction.NAME), - WaitForSnapshotAction::parse - ), - new NamedXContentRegistry.Entry( - LifecycleAction.class, - new ParseField(SearchableSnapshotAction.NAME), - SearchableSnapshotAction::parse - ), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(FreezeAction.NAME), FreezeAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SetPriorityAction.NAME), SetPriorityAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MigrateAction.NAME), MigrateAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DownsampleAction.NAME), DownsampleAction::parse) - ) + Collections.addAll( + entries, + new NamedXContentRegistry.Entry( + LifecycleType.class, + new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE + ), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry( + LifecycleAction.class, + new ParseField(WaitForSnapshotAction.NAME), + WaitForSnapshotAction::parse + ), + new NamedXContentRegistry.Entry( + LifecycleAction.class, + new ParseField(SearchableSnapshotAction.NAME), + SearchableSnapshotAction::parse + ), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(FreezeAction.NAME), FreezeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SetPriorityAction.NAME), SetPriorityAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MigrateAction.NAME), MigrateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DownsampleAction.NAME), DownsampleAction::parse) ); return new NamedXContentRegistry(entries); } @@ -130,7 +133,7 @@ public void testValidationFails() { ClusterState state = ClusterState.builder(clusterName).build(); ReservedLifecycleAction action = new ReservedLifecycleAction(xContentRegistry(), client, mock(XPackLicenseState.class)); - TransformState prevState = new TransformState(state, Collections.emptySet()); + TransformState prevState = new TransformState(state, Set.of()); String badPolicyJSON = """ { @@ -145,9 +148,9 @@ public void testValidationFails() { } }"""; - assertEquals( - "[1:2] [lifecycle_policy] unknown field [phase] did you mean [phases]?", - expectThrows(XContentParseException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage() + assertThat( + expectThrows(XContentParseException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage(), + is("[1:2] [lifecycle_policy] unknown field [phase] did you mean [phases]?") ); } @@ -162,10 +165,10 @@ public void testActionAddRemove() throws Exception { String emptyJSON = ""; - TransformState prevState = new TransformState(state, Collections.emptySet()); + TransformState prevState = new TransformState(state, Set.of()); TransformState updatedState = processJSON(action, prevState, emptyJSON); - assertEquals(0, updatedState.keys().size()); + assertThat(updatedState.keys(), empty()); assertEquals(prevState.state(), updatedState.state()); String twoPoliciesJSON = """ @@ -359,9 +362,9 @@ public void testOperatorControllerFromJSONContent() throws IOException { AtomicReference x = new AtomicReference<>(); try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, testJSON)) { - controller.process("operator", parser, (e) -> x.set(e)); + controller.process("operator", parser, x::set); - assertTrue(x.get() instanceof IllegalStateException); + assertThat(x.get(), instanceOf(IllegalStateException.class)); assertThat(x.get().getMessage(), containsString("Error processing state change request for operator")); } @@ -380,11 +383,7 @@ public void testOperatorControllerFromJSONContent() throws IOException { ); try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, testJSON)) { - controller.process("operator", parser, (e) -> { - if (e != null) { - fail("Should not fail"); - } - }); + controller.process("operator", parser, Assert::assertNull); } } @@ -411,9 +410,9 @@ public void testOperatorControllerWithPluginPackage() { "my_timeseries_lifecycle", Map.of( "warm", - new Phase("warm", new TimeValue(10, TimeUnit.SECONDS), Collections.emptyMap()), + new Phase("warm", new TimeValue(10, TimeUnit.SECONDS), Map.of()), "delete", - new Phase("delete", new TimeValue(30, TimeUnit.SECONDS), Collections.emptyMap()) + new Phase("delete", new TimeValue(30, TimeUnit.SECONDS), Map.of()) ) ) ) @@ -421,9 +420,9 @@ public void testOperatorControllerWithPluginPackage() { new ReservedStateVersion(123L, Version.CURRENT) ); - controller.process("operator", pack, (e) -> x.set(e)); + controller.process("operator", pack, x::set); - assertTrue(x.get() instanceof IllegalStateException); + assertThat(x.get(), instanceOf(IllegalStateException.class)); assertThat(x.get().getMessage(), containsString("Error processing state change request for operator")); Client client = mock(Client.class); @@ -440,10 +439,6 @@ public void testOperatorControllerWithPluginPackage() { ) ); - controller.process("operator", pack, (e) -> { - if (e != null) { - fail("Should not fail"); - } - }); + controller.process("operator", pack, Assert::assertNull); } } diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index f4378d8ab5b7c..beeec94f21ebf 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -27,6 +27,10 @@ base { archivesName = 'x-pack-inference' } +versions << [ + 'awsbedrockruntime': '1.12.740' +] + dependencies { implementation project(path: ':libs:elasticsearch-logging') compileOnly project(":server") @@ -38,14 +42,194 @@ dependencies { clusterPlugins project(':x-pack:plugin:inference:qa:test-service-plugin') api "com.ibm.icu:icu4j:${versions.icu4j}" + + runtimeOnly 'com.google.guava:guava:32.0.1-jre' + implementation 'com.google.code.gson:gson:2.10' + implementation "com.google.protobuf:protobuf-java-util:${versions.protobuf}" + implementation "com.google.protobuf:protobuf-java:${versions.protobuf}" + implementation 'com.google.api.grpc:proto-google-iam-v1:1.6.2' + implementation 'com.google.auth:google-auth-library-credentials:1.11.0' + implementation 'com.google.auth:google-auth-library-oauth2-http:1.11.0' + implementation "com.google.oauth-client:google-oauth-client:${versions.google_oauth_client}" + implementation 'com.google.api-client:google-api-client:2.1.1' + implementation 'com.google.http-client:google-http-client:1.42.3' + implementation 'com.google.http-client:google-http-client-gson:1.42.3' + implementation 'com.google.http-client:google-http-client-appengine:1.42.3' + implementation 'com.google.http-client:google-http-client-jackson2:1.42.3' + implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + implementation "com.fasterxml.jackson:jackson-bom:${versions.jackson}" + implementation 'com.google.api:gax-httpjson:0.105.1' + implementation 'io.grpc:grpc-context:1.49.2' + implementation 'io.opencensus:opencensus-api:0.31.1' + implementation 'io.opencensus:opencensus-contrib-http-util:0.31.1' + implementation "com.amazonaws:aws-java-sdk-bedrockruntime:${versions.awsbedrockruntime}" + implementation "com.amazonaws:aws-java-sdk-core:${versions.aws}" + implementation "com.amazonaws:jmespath-java:${versions.aws}" + implementation "joda-time:joda-time:2.10.10" + implementation 'javax.xml.bind:jaxb-api:2.2.2' } -if (BuildParams.isSnapshotBuild() == false) { - tasks.withType(Test).configureEach { - systemProperty 'es.semantic_text_feature_flag_enabled', 'true' - } +tasks.named("dependencyLicenses").configure { + mapping from: /google-auth-.*/, to: 'google-auth' + mapping from: /google-http-.*/, to: 'google-http' + mapping from: /opencensus.*/, to: 'opencensus' + mapping from: /protobuf.*/, to: 'protobuf' + mapping from: /proto-google.*/, to: 'proto-google' + mapping from: /jackson.*/, to: 'jackson' + mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk' + mapping from: /jmespath-java.*/, to: 'aws-java-sdk' + mapping from: /jaxb-.*/, to: 'jaxb' +} + +tasks.named("thirdPartyAudit").configure { + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.protobuf.UnsafeUtil', + 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', + 'com.google.protobuf.MessageSchema', + 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + ) + + ignoreMissingClasses( + 'com.google.api.AnnotationsProto', + 'com.google.api.ClientProto', + 'com.google.api.FieldBehaviorProto', + 'com.google.api.ResourceProto', + 'com.google.api.core.AbstractApiFuture', + 'com.google.api.core.ApiFunction', + 'com.google.api.core.ApiFuture', + 'com.google.api.core.ApiFutureCallback', + 'com.google.api.core.ApiFutures', + 'com.google.api.gax.core.BackgroundResource', + 'com.google.api.gax.core.ExecutorProvider', + 'com.google.api.gax.core.GaxProperties', + 'com.google.api.gax.core.GoogleCredentialsProvider', + 'com.google.api.gax.core.GoogleCredentialsProvider$Builder', + 'com.google.api.gax.core.InstantiatingExecutorProvider', + 'com.google.api.gax.core.InstantiatingExecutorProvider$Builder', + 'com.google.api.gax.longrunning.OperationSnapshot', + 'com.google.api.gax.paging.AbstractFixedSizeCollection', + 'com.google.api.gax.paging.AbstractPage', + 'com.google.api.gax.paging.AbstractPagedListResponse', + 'com.google.api.gax.retrying.RetrySettings', + 'com.google.api.gax.retrying.RetrySettings$Builder', + 'com.google.api.gax.rpc.ApiCallContext', + 'com.google.api.gax.rpc.ApiCallContext$Key', + 'com.google.api.gax.rpc.ApiClientHeaderProvider', + 'com.google.api.gax.rpc.ApiClientHeaderProvider$Builder', + 'com.google.api.gax.rpc.ApiException', + 'com.google.api.gax.rpc.ApiExceptionFactory', + 'com.google.api.gax.rpc.BatchingCallSettings', + 'com.google.api.gax.rpc.Callables', + 'com.google.api.gax.rpc.ClientContext', + 'com.google.api.gax.rpc.ClientSettings', + 'com.google.api.gax.rpc.ClientSettings$Builder', + 'com.google.api.gax.rpc.FixedHeaderProvider', + 'com.google.api.gax.rpc.HeaderProvider', + 'com.google.api.gax.rpc.LongRunningClient', + 'com.google.api.gax.rpc.OperationCallSettings', + 'com.google.api.gax.rpc.OperationCallable', + 'com.google.api.gax.rpc.PageContext', + 'com.google.api.gax.rpc.PagedCallSettings', + 'com.google.api.gax.rpc.PagedCallSettings$Builder', + 'com.google.api.gax.rpc.PagedListDescriptor', + 'com.google.api.gax.rpc.PagedListResponseFactory', + 'com.google.api.gax.rpc.ResponseObserver', + 'com.google.api.gax.rpc.ServerStreamingCallSettings', + 'com.google.api.gax.rpc.ServerStreamingCallable', + 'com.google.api.gax.rpc.StateCheckingResponseObserver', + 'com.google.api.gax.rpc.StatusCode', + 'com.google.api.gax.rpc.StatusCode$Code', + 'com.google.api.gax.rpc.StreamController', + 'com.google.api.gax.rpc.StubSettings', + 'com.google.api.gax.rpc.StubSettings$Builder', + 'com.google.api.gax.rpc.TranslatingUnaryCallable', + 'com.google.api.gax.rpc.TransportChannel', + 'com.google.api.gax.rpc.TransportChannelProvider', + 'com.google.api.gax.rpc.UnaryCallSettings', + 'com.google.api.gax.rpc.UnaryCallSettings$Builder', + 'com.google.api.gax.rpc.UnaryCallable', + 'com.google.api.gax.rpc.internal.ApiCallContextOptions', + 'com.google.api.gax.rpc.internal.Headers', + 'com.google.api.gax.rpc.mtls.MtlsProvider', + 'com.google.api.gax.tracing.ApiTracer', + 'com.google.api.gax.tracing.BaseApiTracer', + 'com.google.api.gax.tracing.SpanName', + 'com.google.api.pathtemplate.PathTemplate', + 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', + 'com.google.common.util.concurrent.internal.InternalFutures', + 'com.google.longrunning.CancelOperationRequest', + 'com.google.longrunning.CancelOperationRequest$Builder', + 'com.google.longrunning.DeleteOperationRequest', + 'com.google.longrunning.DeleteOperationRequest$Builder', + 'com.google.longrunning.GetOperationRequest', + 'com.google.longrunning.GetOperationRequest$Builder', + 'com.google.longrunning.ListOperationsRequest', + 'com.google.longrunning.ListOperationsRequest$Builder', + 'com.google.longrunning.ListOperationsResponse', + 'com.google.longrunning.Operation', + 'com.google.rpc.Code', + 'com.google.rpc.Status', + 'com.google.type.Expr', + 'com.google.type.Expr$Builder', + 'com.google.type.ExprOrBuilder', + 'com.google.type.ExprProto', + 'org.threeten.bp.Duration', + 'org.threeten.bp.Instant', + 'com.google.api.client.http.apache.v2.ApacheHttpTransport', + 'com.google.appengine.api.datastore.Blob', + 'com.google.appengine.api.datastore.DatastoreService', + 'com.google.appengine.api.datastore.DatastoreServiceFactory', + 'com.google.appengine.api.datastore.Entity', + 'com.google.appengine.api.datastore.Key', + 'com.google.appengine.api.datastore.KeyFactory', + 'com.google.appengine.api.datastore.PreparedQuery', + 'com.google.appengine.api.datastore.Query', + 'com.google.appengine.api.memcache.Expiration', + 'com.google.appengine.api.memcache.MemcacheService', + 'com.google.appengine.api.memcache.MemcacheServiceFactory', + 'com.google.appengine.api.urlfetch.FetchOptions$Builder', + 'com.google.appengine.api.urlfetch.FetchOptions', + 'com.google.appengine.api.urlfetch.HTTPHeader', + 'com.google.appengine.api.urlfetch.HTTPMethod', + 'com.google.appengine.api.urlfetch.HTTPRequest', + 'com.google.appengine.api.urlfetch.HTTPResponse', + 'com.google.appengine.api.urlfetch.URLFetchService', + 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', + 'software.amazon.ion.IonReader', + 'software.amazon.ion.IonSystem', + 'software.amazon.ion.IonType', + 'software.amazon.ion.IonWriter', + 'software.amazon.ion.Timestamp', + 'software.amazon.ion.system.IonBinaryWriterBuilder', + 'software.amazon.ion.system.IonSystemBuilder', + 'software.amazon.ion.system.IonTextWriterBuilder', + 'software.amazon.ion.system.IonWriterBuilder', + 'javax.activation.DataHandler' + ) } tasks.named('yamlRestTest') { usesDefaultDistribution() } + diff --git a/x-pack/plugin/inference/licenses/aws-java-sdk-LICENSE.txt b/x-pack/plugin/inference/licenses/aws-java-sdk-LICENSE.txt new file mode 100644 index 0000000000000..98d1f9319f374 --- /dev/null +++ b/x-pack/plugin/inference/licenses/aws-java-sdk-LICENSE.txt @@ -0,0 +1,63 @@ +Apache License +Version 2.0, January 2004 + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and + 2. You must cause any modified files to carry prominent notices stating that You changed the files; and + 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +Note: Other license terms may apply to certain, identified software files contained within or distributed with the accompanying software if such terms are included in the directory containing the accompanying software. Such other license terms will then apply in lieu of the terms of the software license above. + +JSON processing code subject to the JSON License from JSON.org: + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/x-pack/plugin/inference/licenses/aws-java-sdk-NOTICE.txt b/x-pack/plugin/inference/licenses/aws-java-sdk-NOTICE.txt new file mode 100644 index 0000000000000..565bd6085c71a --- /dev/null +++ b/x-pack/plugin/inference/licenses/aws-java-sdk-NOTICE.txt @@ -0,0 +1,15 @@ +AWS SDK for Java +Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +This product includes software developed by +Amazon Technologies, Inc (http://www.amazon.com/). + +********************** +THIRD PARTY COMPONENTS +********************** +This software includes third party software subject to the following copyrights: +- XML parsing and utility functions from JetS3t - Copyright 2006-2009 James Murty. +- JSON parsing and utility functions from JSON.org - Copyright 2002 JSON.org. +- PKCS#1 PEM encoded private key parsing and utility functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc. + +The licenses for these third party components are included in LICENSE.txt diff --git a/x-pack/plugin/inference/licenses/gax-httpjson-LICENSE.txt b/x-pack/plugin/inference/licenses/gax-httpjson-LICENSE.txt new file mode 100644 index 0000000000000..267561bb386de --- /dev/null +++ b/x-pack/plugin/inference/licenses/gax-httpjson-LICENSE.txt @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/inference/licenses/gax-httpjson-NOTICE.txt b/x-pack/plugin/inference/licenses/gax-httpjson-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/google-api-client-LICENSE.txt b/x-pack/plugin/inference/licenses/google-api-client-LICENSE.txt new file mode 100644 index 0000000000000..4eedc0116add7 --- /dev/null +++ b/x-pack/plugin/inference/licenses/google-api-client-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/google-api-client-NOTICE.txt b/x-pack/plugin/inference/licenses/google-api-client-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/google-auth-LICENSE.txt b/x-pack/plugin/inference/licenses/google-auth-LICENSE.txt new file mode 100644 index 0000000000000..12edf23c6711f --- /dev/null +++ b/x-pack/plugin/inference/licenses/google-auth-LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/inference/licenses/google-auth-NOTICE.txt b/x-pack/plugin/inference/licenses/google-auth-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/google-http-LICENSE.txt b/x-pack/plugin/inference/licenses/google-http-LICENSE.txt new file mode 100644 index 0000000000000..980a15ac24eeb --- /dev/null +++ b/x-pack/plugin/inference/licenses/google-http-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/google-http-NOTICE.txt b/x-pack/plugin/inference/licenses/google-http-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/google-oauth-client-LICENSE.txt b/x-pack/plugin/inference/licenses/google-oauth-client-LICENSE.txt new file mode 100644 index 0000000000000..12edf23c6711f --- /dev/null +++ b/x-pack/plugin/inference/licenses/google-oauth-client-LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/inference/licenses/google-oauth-client-NOTICE.txt b/x-pack/plugin/inference/licenses/google-oauth-client-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/grpc-context-LICENSE.txt b/x-pack/plugin/inference/licenses/grpc-context-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/inference/licenses/grpc-context-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/grpc-context-NOTICE.txt b/x-pack/plugin/inference/licenses/grpc-context-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/gson-LICENSE.txt b/x-pack/plugin/inference/licenses/gson-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/inference/licenses/gson-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/gson-NOTICE.txt b/x-pack/plugin/inference/licenses/gson-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/guava-LICENSE.txt b/x-pack/plugin/inference/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/inference/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/guava-NOTICE.txt b/x-pack/plugin/inference/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/jackson-LICENSE.txt b/x-pack/plugin/inference/licenses/jackson-LICENSE.txt new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/x-pack/plugin/inference/licenses/jackson-LICENSE.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/x-pack/plugin/inference/licenses/jackson-NOTICE.txt b/x-pack/plugin/inference/licenses/jackson-NOTICE.txt new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/x-pack/plugin/inference/licenses/jackson-NOTICE.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/x-pack/plugin/inference/licenses/jaxb-LICENSE.txt b/x-pack/plugin/inference/licenses/jaxb-LICENSE.txt new file mode 100644 index 0000000000000..833a843cfeee1 --- /dev/null +++ b/x-pack/plugin/inference/licenses/jaxb-LICENSE.txt @@ -0,0 +1,274 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than Source Code. + + 1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of any of the following: + + A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; + + B. Any new file that contains any part of the Original Software or previous Modification; or + + C. Any new file that is contributed or otherwise made available under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. + +---------- +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) +The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. + + + + +The GNU General Public License (GPL) Version 2, June 1991 + + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification follow. + + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. + + c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. + +3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. + +If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. + +This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + + Copyright (C) + + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. + + signature of Ty Coon, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. + + +"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 + +Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code." + +Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. + +As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. diff --git a/libs/preallocate/licenses/jna-NOTICE.txt b/x-pack/plugin/inference/licenses/jaxb-NOTICE.txt similarity index 100% rename from libs/preallocate/licenses/jna-NOTICE.txt rename to x-pack/plugin/inference/licenses/jaxb-NOTICE.txt diff --git a/x-pack/plugin/inference/licenses/joda-time-LICENSE.txt b/x-pack/plugin/inference/licenses/joda-time-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/inference/licenses/joda-time-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/joda-time-NOTICE.txt b/x-pack/plugin/inference/licenses/joda-time-NOTICE.txt new file mode 100644 index 0000000000000..dffbcf31cacf6 --- /dev/null +++ b/x-pack/plugin/inference/licenses/joda-time-NOTICE.txt @@ -0,0 +1,5 @@ +============================================================================= += NOTICE file corresponding to section 4d of the Apache License Version 2.0 = +============================================================================= +This product includes software developed by +Joda.org (http://www.joda.org/). diff --git a/x-pack/plugin/inference/licenses/opencensus-LICENSE.txt b/x-pack/plugin/inference/licenses/opencensus-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/inference/licenses/opencensus-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/opencensus-NOTICE.txt b/x-pack/plugin/inference/licenses/opencensus-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/proto-google-LICENSE.txt b/x-pack/plugin/inference/licenses/proto-google-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/inference/licenses/proto-google-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/inference/licenses/proto-google-NOTICE.txt b/x-pack/plugin/inference/licenses/proto-google-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/inference/licenses/protobuf-LICENSE.txt b/x-pack/plugin/inference/licenses/protobuf-LICENSE.txt new file mode 100644 index 0000000000000..19b305b00060a --- /dev/null +++ b/x-pack/plugin/inference/licenses/protobuf-LICENSE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/x-pack/plugin/inference/licenses/protobuf-NOTICE.txt b/x-pack/plugin/inference/licenses/protobuf-NOTICE.txt new file mode 100644 index 0000000000000..19b305b00060a --- /dev/null +++ b/x-pack/plugin/inference/licenses/protobuf-NOTICE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java index 419869c0c4a5e..f30f2e8fe201a 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java @@ -126,6 +126,25 @@ protected void deleteModel(String modelId, TaskType taskType) throws IOException assertOkOrCreated(response); } + protected void putSemanticText(String endpointId, String indexName) throws IOException { + var request = new Request("PUT", Strings.format("%s", indexName)); + String body = Strings.format(""" + { + "mappings": { + "properties": { + "inference_field": { + "type": "semantic_text", + "inference_id": "%s" + } + } + } + } + """, endpointId); + request.setJsonEntity(body); + var response = client().performRequest(request); + assertOkOrCreated(response); + } + protected Map putModel(String modelId, String modelConfig, TaskType taskType) throws IOException { String endpoint = Strings.format("_inference/%s/%s", taskType, modelId); return putRequest(endpoint, modelConfig); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 75e392b6d155f..242f786e95364 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -16,6 +16,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; @@ -124,14 +125,15 @@ public void testDeleteEndpointWhileReferencedByPipeline() throws IOException { putPipeline(pipelineId, endpointId); { + var errorString = new StringBuilder().append("Inference endpoint ") + .append(endpointId) + .append(" is referenced by pipelines: ") + .append(Set.of(pipelineId)) + .append(". ") + .append("Ensure that no pipelines are using this inference endpoint, ") + .append("or use force to ignore this warning and delete the inference endpoint."); var e = expectThrows(ResponseException.class, () -> deleteModel(endpointId)); - assertThat( - e.getMessage(), - containsString( - "Inference endpoint endpoint_referenced_by_pipeline is referenced by pipelines and cannot be deleted. " - + "Use `force` to delete it anyway, or use `dry_run` to list the pipelines that reference it." - ) - ); + assertThat(e.getMessage(), containsString(errorString.toString())); } { var response = deleteModel(endpointId, "dry_run=true"); @@ -146,4 +148,78 @@ public void testDeleteEndpointWhileReferencedByPipeline() throws IOException { } deletePipeline(pipelineId); } + + public void testDeleteEndpointWhileReferencedBySemanticText() throws IOException { + String endpointId = "endpoint_referenced_by_semantic_text"; + putModel(endpointId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + String indexName = randomAlphaOfLength(10).toLowerCase(); + putSemanticText(endpointId, indexName); + { + + var errorString = new StringBuilder().append(" Inference endpoint ") + .append(endpointId) + .append(" is being used in the mapping for indexes: ") + .append(Set.of(indexName)) + .append(". ") + .append("Ensure that no index mappings are using this inference endpoint, ") + .append("or use force to ignore this warning and delete the inference endpoint."); + var e = expectThrows(ResponseException.class, () -> deleteModel(endpointId)); + assertThat(e.getMessage(), containsString(errorString.toString())); + } + { + var response = deleteModel(endpointId, "dry_run=true"); + var entityString = EntityUtils.toString(response.getEntity()); + assertThat(entityString, containsString("\"acknowledged\":false")); + assertThat(entityString, containsString(indexName)); + } + { + var response = deleteModel(endpointId, "force=true"); + var entityString = EntityUtils.toString(response.getEntity()); + assertThat(entityString, containsString("\"acknowledged\":true")); + } + deleteIndex(indexName); + } + + public void testDeleteEndpointWhileReferencedBySemanticTextAndPipeline() throws IOException { + String endpointId = "endpoint_referenced_by_semantic_text"; + putModel(endpointId, mockSparseServiceModelConfig(), TaskType.SPARSE_EMBEDDING); + String indexName = randomAlphaOfLength(10).toLowerCase(); + putSemanticText(endpointId, indexName); + var pipelineId = "pipeline_referencing_model"; + putPipeline(pipelineId, endpointId); + { + + var errorString = new StringBuilder().append("Inference endpoint ") + .append(endpointId) + .append(" is referenced by pipelines: ") + .append(Set.of(pipelineId)) + .append(". ") + .append("Ensure that no pipelines are using this inference endpoint, ") + .append("or use force to ignore this warning and delete the inference endpoint.") + .append(" Inference endpoint ") + .append(endpointId) + .append(" is being used in the mapping for indexes: ") + .append(Set.of(indexName)) + .append(". ") + .append("Ensure that no index mappings are using this inference endpoint, ") + .append("or use force to ignore this warning and delete the inference endpoint."); + + var e = expectThrows(ResponseException.class, () -> deleteModel(endpointId)); + assertThat(e.getMessage(), containsString(errorString.toString())); + } + { + var response = deleteModel(endpointId, "dry_run=true"); + var entityString = EntityUtils.toString(response.getEntity()); + assertThat(entityString, containsString("\"acknowledged\":false")); + assertThat(entityString, containsString(indexName)); + assertThat(entityString, containsString(pipelineId)); + } + { + var response = deleteModel(endpointId, "force=true"); + var entityString = EntityUtils.toString(response.getEntity()); + assertThat(entityString, containsString("\"acknowledged\":true")); + } + deletePipeline(pipelineId); + deleteIndex(indexName); + } } diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java index d475fd099d4ac..f0196834b9175 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/AzureOpenAiServiceUpgradeIT.java @@ -49,35 +49,39 @@ public static void shutdown() { @AwaitsFix(bugUrl = "Cannot set the URL in the tests") public void testOpenAiEmbeddings() throws IOException { var openAiEmbeddingsSupported = getOldClusterTestVersion().onOrAfter(OPEN_AI_AZURE_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Azure OpenAI embedding service added in " + OPEN_AI_AZURE_EMBEDDINGS_ADDED, openAiEmbeddingsSupported); final String oldClusterId = "old-cluster-embeddings"; final String upgradedClusterId = "upgraded-cluster-embeddings"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { // queue a response as PUT will call the service openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(OpenAiServiceUpgradeIT.embeddingResponse())); - put(oldClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(oldClusterEndpointIdentifier); assertThat(configs, hasSize(1)); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); assertEquals("azureopenai", configs.get(0).get("service")); assertEmbeddingInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); // Inference on old cluster model assertEmbeddingInference(oldClusterId); openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(OpenAiServiceUpgradeIT.embeddingResponse())); - put(upgradedClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterId, embeddingConfig(getUrl(openAiEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterId).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); // Inference on the new config diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java index c889d8f9b312a..c7d95f1f512b2 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java @@ -15,6 +15,8 @@ import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType; import org.hamcrest.Matchers; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.List; @@ -39,7 +41,7 @@ public CohereServiceUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - // @BeforeClass + @BeforeClass public static void startWebServer() throws IOException { cohereEmbeddingsServer = new MockWebServer(); cohereEmbeddingsServer.start(); @@ -48,58 +50,74 @@ public static void startWebServer() throws IOException { cohereRerankServer.start(); } - // @AfterClass // for the awaitsfix + @AfterClass public static void shutdown() { cohereEmbeddingsServer.close(); cohereRerankServer.close(); } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testCohereEmbeddings() throws IOException { var embeddingsSupported = getOldClusterTestVersion().onOrAfter(COHERE_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Cohere embedding service added in " + COHERE_EMBEDDINGS_ADDED, embeddingsSupported); final String oldClusterIdInt8 = "old-cluster-embeddings-int8"; final String oldClusterIdFloat = "old-cluster-embeddings-float"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { // queue a response as PUT will call the service cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseByte())); - put(oldClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), testTaskType); // float model cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseFloat())); - put(oldClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdInt8).get("endpoints"); - assertThat(configs, hasSize(1)); - assertEquals("cohere", configs.get(0).get("service")); - var serviceSettings = (Map) configs.get(0).get("service_settings"); - assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); - var embeddingType = serviceSettings.get("embedding_type"); - // An upgraded node will report the embedding type as byte, the old node int8 - assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); - - assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); - assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + { + var configs = (List>) get(testTaskType, oldClusterIdInt8).get(oldClusterEndpointIdentifier); + assertThat(configs, hasSize(1)); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + var embeddingType = serviceSettings.get("embedding_type"); + // An upgraded node will report the embedding type as byte, the old node int8 + assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); + assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); + } + { + var configs = (List>) get(testTaskType, oldClusterIdFloat).get(oldClusterEndpointIdentifier); + assertThat(configs, hasSize(1)); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + assertThat(serviceSettings, hasEntry("embedding_type", "float")); + assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + } } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdInt8).get("endpoints"); - assertEquals("cohere", configs.get(0).get("service")); - var serviceSettings = (Map) configs.get(0).get("service_settings"); - assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); - var embeddingType = serviceSettings.get("embedding_type"); - // An upgraded node will report the embedding type as byte, an old node int8 - assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); - - configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdFloat).get("endpoints"); - serviceSettings = (Map) configs.get(0).get("service_settings"); - assertThat(serviceSettings, hasEntry("embedding_type", "float")); - - assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); - assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + { + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterIdInt8); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + var embeddingType = serviceSettings.get("embedding_type"); + // An upgraded node will report the embedding type as byte, an old node int8 + assertThat(embeddingType, Matchers.is(oneOf("int8", "byte"))); + assertEmbeddingInference(oldClusterIdInt8, CohereEmbeddingType.BYTE); + } + { + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterIdFloat); + assertEquals("cohere", configs.get(0).get("service")); + var serviceSettings = (Map) configs.get(0).get("service_settings"); + assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); + assertThat(serviceSettings, hasEntry("embedding_type", "float")); + assertEmbeddingInference(oldClusterIdFloat, CohereEmbeddingType.FLOAT); + } } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterIdInt8).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterIdInt8).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "embed-english-light-v3.0")); assertThat(serviceSettings, hasEntry("embedding_type", "byte")); @@ -114,9 +132,9 @@ public void testCohereEmbeddings() throws IOException { final String upgradedClusterIdByte = "upgraded-cluster-embeddings-byte"; cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseByte())); - put(upgradedClusterIdByte, embeddingConfigByte(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterIdByte, embeddingConfigByte(getUrl(cohereEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterIdByte).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterIdByte).get("endpoints"); serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("embedding_type", "byte")); @@ -127,9 +145,9 @@ public void testCohereEmbeddings() throws IOException { final String upgradedClusterIdInt8 = "upgraded-cluster-embeddings-int8"; cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseByte())); - put(upgradedClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterIdInt8, embeddingConfigInt8(getUrl(cohereEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterIdInt8).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterIdInt8).get("endpoints"); serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("embedding_type", "byte")); // int8 rewritten to byte @@ -139,9 +157,9 @@ public void testCohereEmbeddings() throws IOException { { final String upgradedClusterIdFloat = "upgraded-cluster-embeddings-float"; cohereEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponseFloat())); - put(upgradedClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterIdFloat, embeddingConfigFloat(getUrl(cohereEmbeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterIdFloat).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterIdFloat).get("endpoints"); serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("embedding_type", "float")); @@ -169,22 +187,25 @@ void assertEmbeddingInference(String inferenceId, CohereEmbeddingType type) thro } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testRerank() throws IOException { var rerankSupported = getOldClusterTestVersion().onOrAfter(COHERE_RERANK_ADDED); + String old_cluster_endpoint_identifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Cohere rerank service added in " + COHERE_RERANK_ADDED, rerankSupported); final String oldClusterId = "old-cluster-rerank"; final String upgradedClusterId = "upgraded-cluster-rerank"; + var testTaskType = TaskType.RERANK; + if (isOldCluster()) { - put(oldClusterId, rerankConfig(getUrl(cohereRerankServer)), TaskType.RERANK); - var configs = (List>) get(TaskType.RERANK, oldClusterId).get("endpoints"); + put(oldClusterId, rerankConfig(getUrl(cohereRerankServer)), testTaskType); + var configs = (List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier); assertThat(configs, hasSize(1)); assertRerank(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.RERANK, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); + assertEquals("cohere", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "rerank-english-v3.0")); @@ -195,7 +216,7 @@ public void testRerank() throws IOException { } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.RERANK, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); assertEquals("cohere", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "rerank-english-v3.0")); @@ -205,7 +226,7 @@ public void testRerank() throws IOException { assertRerank(oldClusterId); // New endpoint - put(upgradedClusterId, rerankConfig(getUrl(cohereRerankServer)), TaskType.RERANK); + put(upgradedClusterId, rerankConfig(getUrl(cohereRerankServer)), testTaskType); configs = (List>) get(upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java index 899a02776195d..36ee472cc0a13 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/HuggingFaceServiceUpgradeIT.java @@ -13,6 +13,8 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.List; @@ -34,7 +36,7 @@ public HuggingFaceServiceUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - // @BeforeClass + @BeforeClass public static void startWebServer() throws IOException { embeddingsServer = new MockWebServer(); embeddingsServer.start(); @@ -43,47 +45,51 @@ public static void startWebServer() throws IOException { elserServer.start(); } - // @AfterClass for the awaits fix + @AfterClass public static void shutdown() { embeddingsServer.close(); elserServer.close(); } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testHFEmbeddings() throws IOException { var embeddingsSupported = getOldClusterTestVersion().onOrAfter(HF_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("Hugging Face embedding service added in " + HF_EMBEDDINGS_ADDED, embeddingsSupported); final String oldClusterId = "old-cluster-embeddings"; final String upgradedClusterId = "upgraded-cluster-embeddings"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { // queue a response as PUT will call the service embeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(oldClusterId, embeddingConfig(getUrl(embeddingsServer)), TaskType.TEXT_EMBEDDING); + put(oldClusterId, embeddingConfig(getUrl(embeddingsServer)), testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(oldClusterEndpointIdentifier); assertThat(configs, hasSize(1)); assertEmbeddingInference(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); + assertEquals("hugging_face", configs.get(0).get("service")); assertEmbeddingInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); assertEquals("hugging_face", configs.get(0).get("service")); // Inference on old cluster model assertEmbeddingInference(oldClusterId); embeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(upgradedClusterId, embeddingConfig(getUrl(embeddingsServer)), TaskType.TEXT_EMBEDDING); + put(upgradedClusterId, embeddingConfig(getUrl(embeddingsServer)), testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterId).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); assertEmbeddingInference(upgradedClusterId); @@ -100,27 +106,29 @@ void assertEmbeddingInference(String inferenceId) throws IOException { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testElser() throws IOException { var supported = getOldClusterTestVersion().onOrAfter(HF_ELSER_ADDED); + String old_cluster_endpoint_identifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("HF elser service added in " + HF_ELSER_ADDED, supported); final String oldClusterId = "old-cluster-elser"; final String upgradedClusterId = "upgraded-cluster-elser"; + var testTaskType = TaskType.SPARSE_EMBEDDING; + if (isOldCluster()) { - put(oldClusterId, elserConfig(getUrl(elserServer)), TaskType.SPARSE_EMBEDDING); - var configs = (List>) get(TaskType.SPARSE_EMBEDDING, oldClusterId).get("endpoints"); + put(oldClusterId, elserConfig(getUrl(elserServer)), testTaskType); + var configs = (List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier); assertThat(configs, hasSize(1)); assertElser(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.SPARSE_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); assertEquals("hugging_face", configs.get(0).get("service")); assertElser(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.SPARSE_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); assertEquals("hugging_face", configs.get(0).get("service")); var taskSettings = (Map) configs.get(0).get("task_settings"); assertThat(taskSettings.keySet(), empty()); @@ -128,7 +136,7 @@ public void testElser() throws IOException { assertElser(oldClusterId); // New endpoint - put(upgradedClusterId, elserConfig(getUrl(elserServer)), TaskType.SPARSE_EMBEDDING); + put(upgradedClusterId, elserConfig(getUrl(elserServer)), testTaskType); configs = (List>) get(upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java index ecfec2304c8a1..58335eb53b366 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/InferenceUpgradeTestCase.java @@ -16,13 +16,17 @@ import org.elasticsearch.upgrades.AbstractRollingUpgradeTestCase; import java.io.IOException; +import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.core.Strings.format; public class InferenceUpgradeTestCase extends AbstractRollingUpgradeTestCase { + static final String MODELS_RENAMED_TO_ENDPOINTS = "8.15.0"; + public InferenceUpgradeTestCase(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } @@ -104,4 +108,17 @@ protected void put(String inferenceId, String modelConfig, TaskType taskType) th var response = client().performRequest(request); assertOKAndConsume(response); } + + @SuppressWarnings("unchecked") + // in version 8.15, there was a breaking change where "models" was renamed to "endpoints" + LinkedList> getConfigsWithBreakingChangeHandling(TaskType testTaskType, String oldClusterId) throws IOException { + + LinkedList> configs; + configs = new LinkedList<>( + (List>) Objects.requireNonNullElse((get(testTaskType, oldClusterId).get("endpoints")), List.of()) + ); + configs.addAll(Objects.requireNonNullElse((List>) get(testTaskType, oldClusterId).get("models"), List.of())); + + return configs; + } } diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java index bfdcb0e0d5ed4..df995c6f5e620 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/OpenAiServiceUpgradeIT.java @@ -12,6 +12,8 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.List; @@ -35,7 +37,7 @@ public OpenAiServiceUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - // @BeforeClass + @BeforeClass public static void startWebServer() throws IOException { openAiEmbeddingsServer = new MockWebServer(); openAiEmbeddingsServer.start(); @@ -44,33 +46,37 @@ public static void startWebServer() throws IOException { openAiChatCompletionsServer.start(); } - // @AfterClass for the awaits fix + @AfterClass public static void shutdown() { openAiEmbeddingsServer.close(); openAiChatCompletionsServer.close(); } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testOpenAiEmbeddings() throws IOException { var openAiEmbeddingsSupported = getOldClusterTestVersion().onOrAfter(OPEN_AI_EMBEDDINGS_ADDED); + // `gte_v` indicates that the cluster version is Greater Than or Equal to MODELS_RENAMED_TO_ENDPOINTS + String oldClusterEndpointIdentifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("OpenAI embedding service added in " + OPEN_AI_EMBEDDINGS_ADDED, openAiEmbeddingsSupported); final String oldClusterId = "old-cluster-embeddings"; final String upgradedClusterId = "upgraded-cluster-embeddings"; + var testTaskType = TaskType.TEXT_EMBEDDING; + if (isOldCluster()) { String inferenceConfig = oldClusterVersionCompatibleEmbeddingConfig(); // queue a response as PUT will call the service openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(oldClusterId, inferenceConfig, TaskType.TEXT_EMBEDDING); + put(oldClusterId, inferenceConfig, testTaskType); - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(oldClusterEndpointIdentifier); assertThat(configs, hasSize(1)); assertEmbeddingInference(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = getConfigsWithBreakingChangeHandling(testTaskType, oldClusterId); + assertEquals("openai", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); var taskSettings = (Map) configs.get(0).get("task_settings"); @@ -80,7 +86,7 @@ public void testOpenAiEmbeddings() throws IOException { assertEmbeddingInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.TEXT_EMBEDDING, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); // model id is moved to service settings assertThat(serviceSettings, hasEntry("model_id", "text-embedding-ada-002")); @@ -92,9 +98,9 @@ public void testOpenAiEmbeddings() throws IOException { String inferenceConfig = embeddingConfigWithModelInServiceSettings(getUrl(openAiEmbeddingsServer)); openAiEmbeddingsServer.enqueue(new MockResponse().setResponseCode(200).setBody(embeddingResponse())); - put(upgradedClusterId, inferenceConfig, TaskType.TEXT_EMBEDDING); + put(upgradedClusterId, inferenceConfig, testTaskType); - configs = (List>) get(TaskType.TEXT_EMBEDDING, upgradedClusterId).get("endpoints"); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); assertEmbeddingInference(upgradedClusterId); @@ -111,23 +117,29 @@ void assertEmbeddingInference(String inferenceId) throws IOException { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107887") public void testOpenAiCompletions() throws IOException { var openAiEmbeddingsSupported = getOldClusterTestVersion().onOrAfter(OPEN_AI_COMPLETIONS_ADDED); + String old_cluster_endpoint_identifier = oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) ? "endpoints" : "models"; assumeTrue("OpenAI completions service added in " + OPEN_AI_COMPLETIONS_ADDED, openAiEmbeddingsSupported); final String oldClusterId = "old-cluster-completions"; final String upgradedClusterId = "upgraded-cluster-completions"; + var testTaskType = TaskType.COMPLETION; + if (isOldCluster()) { - put(oldClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), TaskType.COMPLETION); + put(oldClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), testTaskType); - var configs = (List>) get(TaskType.COMPLETION, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier); assertThat(configs, hasSize(1)); assertCompletionInference(oldClusterId); } else if (isMixedCluster()) { - var configs = (List>) get(TaskType.COMPLETION, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); + if (oldClusterHasFeature("gte_v" + MODELS_RENAMED_TO_ENDPOINTS) == false) { + configs.addAll((List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier)); + // in version 8.15, there was a breaking change where "models" was renamed to "endpoints" + } assertEquals("openai", configs.get(0).get("service")); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "gpt-4")); @@ -137,7 +149,7 @@ public void testOpenAiCompletions() throws IOException { assertCompletionInference(oldClusterId); } else if (isUpgradedCluster()) { // check old cluster model - var configs = (List>) get(TaskType.COMPLETION, oldClusterId).get("endpoints"); + var configs = (List>) get(testTaskType, oldClusterId).get("endpoints"); var serviceSettings = (Map) configs.get(0).get("service_settings"); assertThat(serviceSettings, hasEntry("model_id", "gpt-4")); var taskSettings = (Map) configs.get(0).get("task_settings"); @@ -145,8 +157,8 @@ public void testOpenAiCompletions() throws IOException { assertCompletionInference(oldClusterId); - put(upgradedClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), TaskType.COMPLETION); - configs = (List>) get(TaskType.COMPLETION, upgradedClusterId).get("endpoints"); + put(upgradedClusterId, chatCompletionsConfig(getUrl(openAiChatCompletionsServer)), testTaskType); + configs = (List>) get(testTaskType, upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); // Inference on the new config diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java index cddcff9692a70..c225f94694c01 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.InferenceServiceExtension; @@ -33,6 +34,7 @@ import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -138,44 +140,87 @@ public void chunkedInfer( private InferenceTextEmbeddingFloatResults makeResults(List input, int dimensions) { List embeddings = new ArrayList<>(); - for (int i = 0; i < input.size(); i++) { - float[] doubleEmbeddings = generateEmbedding(input.get(i), dimensions); - List floatEmbeddings = new ArrayList<>(dimensions); - for (int j = 0; j < dimensions; j++) { - floatEmbeddings.add(doubleEmbeddings[j]); - } + for (String inputString : input) { + List floatEmbeddings = generateEmbedding(inputString, dimensions); embeddings.add(InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(floatEmbeddings)); } return new InferenceTextEmbeddingFloatResults(embeddings); } private List makeChunkedResults(List input, int dimensions) { - var chunks = new ArrayList(); - for (int i = 0; i < input.size(); i++) { - float[] embedding = generateEmbedding(input.get(i), dimensions); - chunks.add(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(embedding)); - } - - return InferenceChunkedTextEmbeddingFloatResults.listOf(input, new InferenceTextEmbeddingFloatResults(chunks)); + InferenceTextEmbeddingFloatResults nonChunkedResults = makeResults(input, dimensions); + return InferenceChunkedTextEmbeddingFloatResults.listOf(input, nonChunkedResults); } protected ServiceSettings getServiceSettingsFromMap(Map serviceSettingsMap) { return TestServiceSettings.fromMap(serviceSettingsMap); } - private static float[] generateEmbedding(String input, int dimensions) { - float[] embedding = new float[dimensions]; - for (int j = 0; j < dimensions; j++) { - embedding[j] = input.hashCode() + 1 + j; + /** + * Generate a test embedding for the provided input. + *

      + * The goal of this method is to generate an embedding with the following properties: + *

      + *
        + *
      • Unique to the input
      • + *
      • Reproducible (i.e given the same input, the same embedding should be generated)
      • + *
      • Valid as both a float and byte embedding
      • + *
      + *

      + * The embedding is generated by: + *

      + *
        + *
      • getting the hash code of the input
      • + *
      • converting the hash code value to a string
      • + *
      • converting the string to a UTF-8 encoded byte array
      • + *
      • repeatedly appending the byte array to the embedding until the desired number of dimensions are populated
      • + *
      + *

      + * Since the hash code value, when interpreted as a string, is guaranteed to only contain digits and the "-" character, the UTF-8 + * encoded byte array is guaranteed to only contain values in the standard ASCII table. + *

      + * + * @param input The input string + * @param dimensions The embedding dimension count + * @return An embedding + */ + private static List generateEmbedding(String input, int dimensions) { + List embedding = new ArrayList<>(dimensions); + + byte[] byteArray = Integer.toString(input.hashCode()).getBytes(StandardCharsets.UTF_8); + List embeddingValues = new ArrayList<>(byteArray.length); + for (byte value : byteArray) { + embeddingValues.add((float) value); + } + + int remainingDimensions = dimensions; + while (remainingDimensions >= embeddingValues.size()) { + embedding.addAll(embeddingValues); + remainingDimensions -= embeddingValues.size(); + } + if (remainingDimensions > 0) { + embedding.addAll(embeddingValues.subList(0, remainingDimensions)); } + return embedding; } } - public record TestServiceSettings(String model, Integer dimensions, SimilarityMeasure similarity) implements ServiceSettings { + public record TestServiceSettings( + String model, + Integer dimensions, + SimilarityMeasure similarity, + DenseVectorFieldMapper.ElementType elementType + ) implements ServiceSettings { static final String NAME = "test_text_embedding_service_settings"; + public TestServiceSettings { + if (elementType == DenseVectorFieldMapper.ElementType.BIT) { + throw new IllegalArgumentException("Test dense inference service does not yet support element type BIT"); + } + } + public static TestServiceSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); @@ -195,11 +240,26 @@ public static TestServiceSettings fromMap(Map map) { similarity = SimilarityMeasure.fromString(similarityStr); } - return new TestServiceSettings(model, dimensions, similarity); + DenseVectorFieldMapper.ElementType elementType = null; + String elementTypeStr = (String) map.remove("element_type"); + if (elementTypeStr != null) { + elementType = DenseVectorFieldMapper.ElementType.fromString(elementTypeStr); + } + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new TestServiceSettings(model, dimensions, similarity, elementType); } public TestServiceSettings(StreamInput in) throws IOException { - this(in.readString(), in.readOptionalInt(), in.readOptionalEnum(SimilarityMeasure.class)); + this( + in.readString(), + in.readOptionalInt(), + in.readOptionalEnum(SimilarityMeasure.class), + in.readOptionalEnum(DenseVectorFieldMapper.ElementType.class) + ); } @Override @@ -210,6 +270,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (similarity != null) { builder.field("similarity", similarity); } + if (elementType != null) { + builder.field("element_type", elementType); + } builder.endObject(); return builder; } @@ -229,22 +292,23 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(model); out.writeInt(dimensions); out.writeOptionalEnum(similarity); + out.writeOptionalEnum(elementType); } @Override public ToXContentObject getFilteredXContentObject() { - return (builder, params) -> { - builder.startObject(); - builder.field("model", model); - builder.field("dimensions", dimensions); - if (similarity != null) { - builder.field("similarity", similarity); - } - builder.endObject(); - return builder; - }; + return this; } + @Override + public SimilarityMeasure similarity() { + return similarity != null ? similarity : SimilarityMeasure.COSINE; + } + + @Override + public DenseVectorFieldMapper.ElementType elementType() { + return elementType != null ? elementType : DenseVectorFieldMapper.ElementType.FLOAT; + } } } diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java index 6460b06f13800..752472b90374b 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestInferenceServicePlugin.java @@ -39,6 +39,11 @@ public List getNamedWriteables() { ServiceSettings.class, TestSparseInferenceServiceExtension.TestServiceSettings.NAME, TestSparseInferenceServiceExtension.TestServiceSettings::new + ), + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + TestRerankingServiceExtension.TestServiceSettings.NAME, + TestRerankingServiceExtension.TestServiceSettings::new ) ); } diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java new file mode 100644 index 0000000000000..b2f3b6f774a6f --- /dev/null +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mock; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceExtension; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class TestRerankingServiceExtension implements InferenceServiceExtension { + @Override + public List getInferenceServiceFactories() { + return List.of(TestInferenceService::new); + } + + public static class TestRerankingModel extends Model { + public TestRerankingModel(String inferenceEntityId, TestServiceSettings serviceSettings) { + super( + new ModelConfigurations(inferenceEntityId, TaskType.RERANK, TestInferenceService.NAME, serviceSettings), + new ModelSecrets(new AbstractTestInferenceService.TestSecretSettings("api_key")) + ); + } + } + + public static class TestInferenceService extends AbstractTestInferenceService { + public static final String NAME = "test_reranking_service"; + + public TestInferenceService(InferenceServiceFactoryContext context) {} + + @Override + public String name() { + return NAME; + } + + @Override + @SuppressWarnings("unchecked") + public void parseRequestConfig( + String modelId, + TaskType taskType, + Map config, + Set platformArchitectures, + ActionListener parsedModelListener + ) { + var serviceSettingsMap = (Map) config.remove(ModelConfigurations.SERVICE_SETTINGS); + var serviceSettings = TestServiceSettings.fromMap(serviceSettingsMap); + var secretSettings = TestSecretSettings.fromMap(serviceSettingsMap); + + var taskSettingsMap = getTaskSettingsMap(config); + var taskSettings = TestTaskSettings.fromMap(taskSettingsMap); + + parsedModelListener.onResponse(new TestServiceModel(modelId, taskType, name(), serviceSettings, taskSettings, secretSettings)); + } + + @Override + public void infer( + Model model, + @Nullable String query, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + switch (model.getConfigurations().getTaskType()) { + case ANY, RERANK -> listener.onResponse(makeResults(input)); + default -> listener.onFailure( + new ElasticsearchStatusException( + TaskType.unsupportedTaskTypeErrorMsg(model.getConfigurations().getTaskType(), name()), + RestStatus.BAD_REQUEST + ) + ); + } + } + + @Override + public void chunkedInfer( + Model model, + @Nullable String query, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + TimeValue timeout, + ActionListener> listener + ) { + listener.onFailure( + new ElasticsearchStatusException( + TaskType.unsupportedTaskTypeErrorMsg(model.getConfigurations().getTaskType(), name()), + RestStatus.BAD_REQUEST + ) + ); + } + + private RankedDocsResults makeResults(List input) { + List results = new ArrayList<>(); + int totalResults = input.size(); + float resultDiff = 0.2f; + for (int i = 0; i < input.size(); i++) { + results.add(new RankedDocsResults.RankedDoc(totalResults - 1 - i, resultDiff * (totalResults - i), input.get(i))); + } + return new RankedDocsResults(results); + } + + protected ServiceSettings getServiceSettingsFromMap(Map serviceSettingsMap) { + return TestServiceSettings.fromMap(serviceSettingsMap); + } + } + + public record TestServiceSettings(String model_id) implements ServiceSettings { + + static final String NAME = "test_reranking_service_settings"; + + public static TestServiceSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + String model = (String) map.remove("model_id"); + + if (model == null) { + validationException.addValidationError("missing model"); + } + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new TestServiceSettings(model); + } + + public TestServiceSettings(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("model_id", model_id); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.current(); // fine for these tests but will not work for cluster upgrade tests + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(model_id); + } + + @Override + public ToXContentObject getFilteredXContentObject() { + return (builder, params) -> { + builder.startObject(); + builder.field("model_id", model_id); + builder.endObject(); + return builder; + }; + } + } +} diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension b/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension index c1908dc788251..690168b538fb9 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/resources/META-INF/services/org.elasticsearch.inference.InferenceServiceExtension @@ -1,2 +1,3 @@ org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension +org.elasticsearch.xpack.inference.mock.TestRerankingServiceExtension diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java index 300c0d2c471dc..8da1aaabd517a 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -45,7 +46,9 @@ public void setup() throws Exception { client(), randomIntBetween(1, 100), // dot product means that we need normalized vectors; it's not worth doing that in this test - randomValueOtherThan(SimilarityMeasure.DOT_PRODUCT, () -> randomFrom(SimilarityMeasure.values())) + randomValueOtherThan(SimilarityMeasure.DOT_PRODUCT, () -> randomFrom(SimilarityMeasure.values())), + // TODO: Allow element type BIT once TestDenseInferenceServiceExtension supports it + randomValueOtherThan(DenseVectorFieldMapper.ElementType.BIT, () -> randomFrom(DenseVectorFieldMapper.ElementType.values())) ); } diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index c67c6f29d69c5..a7e5718a0920e 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -19,10 +19,20 @@ requires org.apache.lucene.core; requires org.apache.lucene.join; requires com.ibm.icu; + requires com.google.auth.oauth2; + requires com.google.auth; + requires com.google.api.client; + requires com.google.gson; + requires aws.java.sdk.bedrockruntime; + requires aws.java.sdk.core; + requires com.fasterxml.jackson.databind; + requires org.joda.time; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; exports org.elasticsearch.xpack.inference.rest; exports org.elasticsearch.xpack.inference.services; exports org.elasticsearch.xpack.inference; + + provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.inference.InferenceFeatures; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java new file mode 100644 index 0000000000000..4cc7f5b502ba9 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; + +import java.util.Set; + +/** + * Provides inference features. + */ +public class InferenceFeatures implements FeatureSpecification { + + @Override + public Set getFeatures() { + return Set.of(TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index b3dbd97d495a9..f8ce9df1fb194 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -24,6 +24,12 @@ import org.elasticsearch.xpack.core.inference.results.LegacyTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionServiceSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionServiceSettings; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettings; import org.elasticsearch.xpack.inference.services.azureaistudio.completion.AzureAiStudioChatCompletionServiceSettings; import org.elasticsearch.xpack.inference.services.azureaistudio.completion.AzureAiStudioChatCompletionTaskSettings; import org.elasticsearch.xpack.inference.services.azureaistudio.embeddings.AzureAiStudioEmbeddingsServiceSettings; @@ -39,12 +45,20 @@ import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsTaskSettings; import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankServiceSettings; import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankTaskSettings; +import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandInternalServiceSettings; +import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandInternalTextEmbeddingServiceSettings; +import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandRerankTaskSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.MultilingualE5SmallInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elser.ElserInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elser.ElserMlNodeTaskSettings; import org.elasticsearch.xpack.inference.services.googleaistudio.completion.GoogleAiStudioCompletionServiceSettings; import org.elasticsearch.xpack.inference.services.googleaistudio.embeddings.GoogleAiStudioEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankServiceSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankTaskSettings; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceServiceSettings; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserServiceSettings; import org.elasticsearch.xpack.inference.services.mistral.embeddings.MistralEmbeddingsServiceSettings; @@ -108,11 +122,50 @@ public static List getNamedWriteables() { addAzureOpenAiNamedWriteables(namedWriteables); addAzureAiStudioNamedWriteables(namedWriteables); addGoogleAiStudioNamedWritables(namedWriteables); + addGoogleVertexAiNamedWriteables(namedWriteables); addMistralNamedWriteables(namedWriteables); + addCustomElandWriteables(namedWriteables); + addAnthropicNamedWritables(namedWriteables); + addAmazonBedrockNamedWriteables(namedWriteables); return namedWriteables; } + private static void addAmazonBedrockNamedWriteables(List namedWriteables) { + namedWriteables.add( + new NamedWriteableRegistry.Entry( + AmazonBedrockSecretSettings.class, + AmazonBedrockSecretSettings.NAME, + AmazonBedrockSecretSettings::new + ) + ); + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + AmazonBedrockEmbeddingsServiceSettings.NAME, + AmazonBedrockEmbeddingsServiceSettings::new + ) + ); + + // no task settings for Amazon Bedrock Embeddings + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + AmazonBedrockChatCompletionServiceSettings.NAME, + AmazonBedrockChatCompletionServiceSettings::new + ) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + TaskSettings.class, + AmazonBedrockChatCompletionTaskSettings.NAME, + AmazonBedrockChatCompletionTaskSettings::new + ) + ); + } + private static void addMistralNamedWriteables(List namedWriteables) { namedWriteables.add( new NamedWriteableRegistry.Entry( @@ -283,6 +336,44 @@ private static void addGoogleAiStudioNamedWritables(List namedWriteables) { + namedWriteables.add( + new NamedWriteableRegistry.Entry(SecretSettings.class, GoogleVertexAiSecretSettings.NAME, GoogleVertexAiSecretSettings::new) + ); + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + GoogleVertexAiEmbeddingsServiceSettings.NAME, + GoogleVertexAiEmbeddingsServiceSettings::new + ) + ); + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + TaskSettings.class, + GoogleVertexAiEmbeddingsTaskSettings.NAME, + GoogleVertexAiEmbeddingsTaskSettings::new + ) + ); + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + GoogleVertexAiRerankServiceSettings.NAME, + GoogleVertexAiRerankServiceSettings::new + ) + ); + + namedWriteables.add( + new NamedWriteableRegistry.Entry( + TaskSettings.class, + GoogleVertexAiRerankTaskSettings.NAME, + GoogleVertexAiRerankTaskSettings::new + ) + ); + } + private static void addInternalElserNamedWriteables(List namedWriteables) { namedWriteables.add( new NamedWriteableRegistry.Entry(ServiceSettings.class, ElserInternalServiceSettings.NAME, ElserInternalServiceSettings::new) @@ -349,4 +440,40 @@ private static void addInferenceResultsNamedWriteables(List namedWriteables) { + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + CustomElandInternalServiceSettings.NAME, + CustomElandInternalServiceSettings::new + ) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + CustomElandInternalTextEmbeddingServiceSettings.NAME, + CustomElandInternalTextEmbeddingServiceSettings::new + ) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry(TaskSettings.class, CustomElandRerankTaskSettings.NAME, CustomElandRerankTaskSettings::new) + ); + } + + private static void addAnthropicNamedWritables(List namedWriteables) { + namedWriteables.add( + new NamedWriteableRegistry.Entry( + ServiceSettings.class, + AnthropicChatCompletionServiceSettings.NAME, + AnthropicChatCompletionServiceSettings::new + ) + ); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + TaskSettings.class, + AnthropicChatCompletionTaskSettings.NAME, + AnthropicChatCompletionTaskSettings::new + ) + ); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 1e0f715e3f3e9..1c388f7399260 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -34,8 +34,10 @@ import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.search.rank.RankBuilder; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.inference.action.DeleteInferenceEndpointAction; @@ -51,6 +53,7 @@ import org.elasticsearch.xpack.inference.action.TransportPutInferenceModelAction; import org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilter; import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockRequestSender; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.external.http.retry.RetrySettings; @@ -59,6 +62,8 @@ import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.rest.RestDeleteInferenceEndpointAction; import org.elasticsearch.xpack.inference.rest.RestGetInferenceDiagnosticsAction; @@ -66,12 +71,15 @@ import org.elasticsearch.xpack.inference.rest.RestInferenceAction; import org.elasticsearch.xpack.inference.rest.RestPutInferenceModelAction; import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockService; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicService; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioService; import org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiService; import org.elasticsearch.xpack.inference.services.cohere.CohereService; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalService; import org.elasticsearch.xpack.inference.services.elser.ElserInternalService; import org.elasticsearch.xpack.inference.services.googleaistudio.GoogleAiStudioService; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiService; import org.elasticsearch.xpack.inference.services.huggingface.HuggingFaceService; import org.elasticsearch.xpack.inference.services.huggingface.elser.HuggingFaceElserService; import org.elasticsearch.xpack.inference.services.mistral.MistralService; @@ -108,8 +116,10 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP public static final String NAME = "inference"; public static final String UTILITY_THREAD_POOL_NAME = "inference_utility"; + private final Settings settings; private final SetOnce httpFactory = new SetOnce<>(); + private final SetOnce amazonBedrockFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); private final SetOnce inferenceServiceRegistry = new SetOnce<>(); @@ -163,6 +173,9 @@ public Collection createComponents(PluginServices services) { var httpRequestSenderFactory = new HttpRequestSender.Factory(serviceComponents.get(), httpClientManager, services.clusterService()); httpFactory.set(httpRequestSenderFactory); + var amazonBedrockRequestSenderFactory = new AmazonBedrockRequestSender.Factory(serviceComponents.get(), services.clusterService()); + amazonBedrockFactory.set(amazonBedrockRequestSenderFactory); + ModelRegistry modelRegistry = new ModelRegistry(services.client()); if (inferenceServiceExtensions == null) { @@ -199,15 +212,18 @@ public List getInferenceServiceFactories() { context -> new AzureOpenAiService(httpFactory.get(), serviceComponents.get()), context -> new AzureAiStudioService(httpFactory.get(), serviceComponents.get()), context -> new GoogleAiStudioService(httpFactory.get(), serviceComponents.get()), + context -> new GoogleVertexAiService(httpFactory.get(), serviceComponents.get()), context -> new MistralService(httpFactory.get(), serviceComponents.get()), + context -> new AnthropicService(httpFactory.get(), serviceComponents.get()), + context -> new AmazonBedrockService(httpFactory.get(), amazonBedrockFactory.get(), serviceComponents.get()), ElasticsearchInternalService::new ); } @Override public List getNamedWriteables() { - var entries = new ArrayList(); - entries.addAll(InferenceNamedWriteablesProvider.getNamedWriteables()); + var entries = new ArrayList<>(InferenceNamedWriteablesProvider.getNamedWriteables()); + entries.add(new NamedWriteableRegistry.Entry(RankBuilder.class, TextSimilarityRankBuilder.NAME, TextSimilarityRankBuilder::new)); return entries; } @@ -285,24 +301,22 @@ public void close() { @Override public Map getMappers() { - if (SemanticTextFeature.isEnabled()) { - return Map.of(SemanticTextFieldMapper.CONTENT_TYPE, SemanticTextFieldMapper.PARSER); - } - return Map.of(); + return Map.of(SemanticTextFieldMapper.CONTENT_TYPE, SemanticTextFieldMapper.PARSER); } @Override public Collection getMappedActionFilters() { - if (SemanticTextFeature.isEnabled()) { - return singletonList(shardBulkInferenceActionFilter.get()); - } - return List.of(); + return singletonList(shardBulkInferenceActionFilter.get()); } public List> getQueries() { - if (SemanticTextFeature.isEnabled()) { - return List.of(new QuerySpec<>(SemanticQueryBuilder.NAME, SemanticQueryBuilder::new, SemanticQueryBuilder::fromXContent)); - } - return List.of(); + return List.of(new QuerySpec<>(SemanticQueryBuilder.NAME, SemanticQueryBuilder::new, SemanticQueryBuilder::fromXContent)); + } + + @Override + public List> getRetrievers() { + return List.of( + new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent) + ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceEndpointAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceEndpointAction.java index 07d5e1e618578..e59ac4e1356f0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceEndpointAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceEndpointAction.java @@ -3,6 +3,8 @@ * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. + * + * this file was contributed to by a Generative AI */ package org.elasticsearch.xpack.inference.action; @@ -11,6 +13,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -18,12 +21,10 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.inference.InferenceServiceRegistry; -import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -34,6 +35,10 @@ import org.elasticsearch.xpack.inference.registry.ModelRegistry; import java.util.Set; +import java.util.concurrent.Executor; + +import static org.elasticsearch.xpack.core.ml.utils.SemanticTextInfoExtractor.extractIndexesReferencingInferenceEndpoints; +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; public class TransportDeleteInferenceEndpointAction extends TransportMasterNodeAction< DeleteInferenceEndpointAction.Request, @@ -42,6 +47,7 @@ public class TransportDeleteInferenceEndpointAction extends TransportMasterNodeA private final ModelRegistry modelRegistry; private final InferenceServiceRegistry serviceRegistry; private static final Logger logger = LogManager.getLogger(TransportDeleteInferenceEndpointAction.class); + private final Executor executor; @Inject public TransportDeleteInferenceEndpointAction( @@ -66,6 +72,7 @@ public TransportDeleteInferenceEndpointAction( ); this.modelRegistry = modelRegistry; this.serviceRegistry = serviceRegistry; + this.executor = threadPool.executor(UTILITY_THREAD_POOL_NAME); } @Override @@ -74,6 +81,15 @@ protected void masterOperation( DeleteInferenceEndpointAction.Request request, ClusterState state, ActionListener masterListener + ) { + // workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can + executor.execute(ActionRunnable.wrap(masterListener, l -> doExecuteForked(request, state, l))); + } + + private void doExecuteForked( + DeleteInferenceEndpointAction.Request request, + ClusterState state, + ActionListener masterListener ) { SubscribableListener.newForked(modelConfigListener -> { // Get the model from the registry @@ -89,17 +105,15 @@ protected void masterOperation( } if (request.isDryRun()) { - masterListener.onResponse( - new DeleteInferenceEndpointAction.Response( - false, - InferenceProcessorInfoExtractor.pipelineIdsForResource(state, Set.of(request.getInferenceEndpointId())) - ) - ); + handleDryRun(request, state, masterListener); return; - } else if (request.isForceDelete() == false - && endpointIsReferencedInPipelines(state, request.getInferenceEndpointId(), listener)) { + } else if (request.isForceDelete() == false) { + var errorString = endpointIsReferencedInPipelinesOrIndexes(state, request.getInferenceEndpointId()); + if (errorString != null) { + listener.onFailure(new ElasticsearchStatusException(errorString, RestStatus.CONFLICT)); return; } + } var service = serviceRegistry.getService(unparsedModel.service()); if (service.isPresent()) { @@ -126,47 +140,83 @@ && endpointIsReferencedInPipelines(state, request.getInferenceEndpointId(), list }) .addListener( masterListener.delegateFailure( - (l3, didDeleteModel) -> masterListener.onResponse(new DeleteInferenceEndpointAction.Response(didDeleteModel, Set.of())) + (l3, didDeleteModel) -> masterListener.onResponse( + new DeleteInferenceEndpointAction.Response(didDeleteModel, Set.of(), Set.of(), null) + ) ) ); } - private static boolean endpointIsReferencedInPipelines( - final ClusterState state, - final String inferenceEndpointId, - ActionListener listener + private static void handleDryRun( + DeleteInferenceEndpointAction.Request request, + ClusterState state, + ActionListener masterListener ) { - Metadata metadata = state.getMetadata(); - if (metadata == null) { - listener.onFailure( - new ElasticsearchStatusException( - " Could not determine if the endpoint is referenced in a pipeline as cluster state metadata was unexpectedly null. " - + "Use `force` to delete it anyway", - RestStatus.INTERNAL_SERVER_ERROR - ) - ); - // Unsure why the ClusterState metadata would ever be null, but in this case it seems safer to assume the endpoint is referenced - return true; + Set pipelines = InferenceProcessorInfoExtractor.pipelineIdsForResource(state, Set.of(request.getInferenceEndpointId())); + + Set indexesReferencedBySemanticText = extractIndexesReferencingInferenceEndpoints( + state.getMetadata(), + Set.of(request.getInferenceEndpointId()) + ); + + masterListener.onResponse( + new DeleteInferenceEndpointAction.Response( + false, + pipelines, + indexesReferencedBySemanticText, + buildErrorString(request.getInferenceEndpointId(), pipelines, indexesReferencedBySemanticText) + ) + ); + } + + private static String endpointIsReferencedInPipelinesOrIndexes(final ClusterState state, final String inferenceEndpointId) { + + var pipelines = endpointIsReferencedInPipelines(state, inferenceEndpointId); + var indexes = endpointIsReferencedInIndex(state, inferenceEndpointId); + + if (pipelines.isEmpty() == false || indexes.isEmpty() == false) { + return buildErrorString(inferenceEndpointId, pipelines, indexes); } - IngestMetadata ingestMetadata = metadata.custom(IngestMetadata.TYPE); - if (ingestMetadata == null) { - logger.debug("No ingest metadata found in cluster state while attempting to delete inference endpoint"); - } else { - Set modelIdsReferencedByPipelines = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(ingestMetadata); - if (modelIdsReferencedByPipelines.contains(inferenceEndpointId)) { - listener.onFailure( - new ElasticsearchStatusException( - "Inference endpoint " - + inferenceEndpointId - + " is referenced by pipelines and cannot be deleted. " - + "Use `force` to delete it anyway, or use `dry_run` to list the pipelines that reference it.", - RestStatus.CONFLICT - ) - ); - return true; - } + return null; + } + + private static String buildErrorString(String inferenceEndpointId, Set pipelines, Set indexes) { + StringBuilder errorString = new StringBuilder(); + + if (pipelines.isEmpty() == false) { + errorString.append("Inference endpoint ") + .append(inferenceEndpointId) + .append(" is referenced by pipelines: ") + .append(pipelines) + .append(". ") + .append("Ensure that no pipelines are using this inference endpoint, ") + .append("or use force to ignore this warning and delete the inference endpoint."); } - return false; + + if (indexes.isEmpty() == false) { + errorString.append(" Inference endpoint ") + .append(inferenceEndpointId) + .append(" is being used in the mapping for indexes: ") + .append(indexes) + .append(". ") + .append("Ensure that no index mappings are using this inference endpoint, ") + .append("or use force to ignore this warning and delete the inference endpoint."); + } + + return errorString.toString(); + } + + private static Set endpointIsReferencedInIndex(final ClusterState state, final String inferenceEndpointId) { + Set indexes = extractIndexesReferencingInferenceEndpoints(state.getMetadata(), Set.of(inferenceEndpointId)); + return indexes; + } + + private static Set endpointIsReferencedInPipelines(final ClusterState state, final String inferenceEndpointId) { + Set modelIdsReferencedByPipelines = InferenceProcessorInfoExtractor.pipelineIdsForResource( + state, + Set.of(inferenceEndpointId) + ); + return modelIdsReferencedByPipelines; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunker.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunker.java similarity index 99% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunker.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunker.java index 01a345909c6b1..7587dbf8ca95b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunker.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunker.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.common; +package org.elasticsearch.xpack.inference.chunking; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; @@ -46,6 +46,7 @@ public static EmbeddingType fromDenseVectorElementType(DenseVectorFieldMapper.El return switch (elementType) { case BYTE -> EmbeddingType.BYTE; case FLOAT -> EmbeddingType.FLOAT; + case BIT -> throw new IllegalArgumentException("Bit vectors are not supported"); }; } }; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunker.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunker.java new file mode 100644 index 0000000000000..258a127dac8ab --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunker.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.chunking; + +import com.ibm.icu.text.BreakIterator; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Split text into chunks aligned on sentence boundaries. + * The maximum chunk size is measured in words and controlled + * by {@code maxNumberWordsPerChunk}. Sentences are combined + * greedily until adding the next sentence would exceed + * {@code maxNumberWordsPerChunk}, at which point a new chunk + * is created. If an individual sentence is longer than + * {@code maxNumberWordsPerChunk} it is split on word boundary with + * overlap. + */ +public class SentenceBoundaryChunker { + + private final BreakIterator sentenceIterator; + private final BreakIterator wordIterator; + + public SentenceBoundaryChunker() { + sentenceIterator = BreakIterator.getSentenceInstance(Locale.ROOT); + wordIterator = BreakIterator.getWordInstance(Locale.ROOT); + } + + /** + * Break the input text into small chunks on sentence boundaries. + * + * @param input Text to chunk + * @param maxNumberWordsPerChunk Maximum size of the chunk + * @return The input text chunked + */ + public List chunk(String input, int maxNumberWordsPerChunk) { + var chunks = new ArrayList(); + + sentenceIterator.setText(input); + wordIterator.setText(input); + + int chunkStart = 0; + int chunkEnd = 0; + int sentenceStart = 0; + int chunkWordCount = 0; + + int boundary = sentenceIterator.next(); + + while (boundary != BreakIterator.DONE) { + int sentenceEnd = sentenceIterator.current(); + int countWordsInSentence = countWords(sentenceStart, sentenceEnd); + + if (chunkWordCount + countWordsInSentence > maxNumberWordsPerChunk) { + // over the max chunk size, roll back to the last sentence + + if (chunkWordCount > 0) { + // add a new chunk containing all the input up to this sentence + chunks.add(input.substring(chunkStart, chunkEnd)); + chunkStart = chunkEnd; + chunkWordCount = countWordsInSentence; // the next chunk will contain this sentence + } + + if (countWordsInSentence > maxNumberWordsPerChunk) { + // This sentence is bigger than the max chunk size. + // Split the sentence on the word boundary + var sentenceSplits = splitLongSentence( + input.substring(chunkStart, sentenceEnd), + maxNumberWordsPerChunk, + overlapForChunkSize(maxNumberWordsPerChunk) + ); + + int i = 0; + for (; i < sentenceSplits.size() - 1; i++) { + // Because the substring was passed to splitLongSentence() + // the returned positions need to be offset by chunkStart + chunks.add(input.substring(chunkStart + sentenceSplits.get(i).start(), chunkStart + sentenceSplits.get(i).end())); + } + // The final split is partially filled. + // Set the next chunk start to the beginning of the + // final split of the long sentence. + chunkStart = chunkStart + sentenceSplits.get(i).start(); // start pos needs to be offset by chunkStart + chunkWordCount = sentenceSplits.get(i).wordCount(); + } + } else { + chunkWordCount += countWordsInSentence; + } + + sentenceStart = sentenceEnd; + chunkEnd = sentenceEnd; + + boundary = sentenceIterator.next(); + } + + if (chunkWordCount > 0) { + chunks.add(input.substring(chunkStart)); + } + + return chunks; + } + + static List splitLongSentence(String text, int maxNumberOfWords, int overlap) { + return new WordBoundaryChunker().chunkPositions(text, maxNumberOfWords, overlap); + } + + private int countWords(int start, int end) { + return countWords(start, end, this.wordIterator); + } + + // Exposed for testing. wordIterator should have had + // setText() applied before using this function. + static int countWords(int start, int end, BreakIterator wordIterator) { + assert start < end; + wordIterator.preceding(start); // start of the current word + + int boundary = wordIterator.current(); + int wordCount = 0; + while (boundary != BreakIterator.DONE && boundary <= end) { + int wordStatus = wordIterator.getRuleStatus(); + if (wordStatus != BreakIterator.WORD_NONE) { + wordCount++; + } + boundary = wordIterator.next(); + } + + return wordCount; + } + + private static int overlapForChunkSize(int chunkSize) { + return (chunkSize - 1) / 2; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/WordBoundaryChunker.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunker.java similarity index 73% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/WordBoundaryChunker.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunker.java index d3bb9154fd426..4233f917f8f80 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/WordBoundaryChunker.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunker.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.common; +package org.elasticsearch.xpack.inference.chunking; import com.ibm.icu.text.BreakIterator; @@ -32,6 +32,8 @@ public WordBoundaryChunker() { wordIterator = BreakIterator.getWordInstance(Locale.ROOT); } + record ChunkPosition(int start, int end, int wordCount) {} + /** * Break the input text into small chunks as dictated * by the chunking parameters @@ -42,6 +44,29 @@ public WordBoundaryChunker() { * @return List of chunked text */ public List chunk(String input, int chunkSize, int overlap) { + + if (input.isEmpty()) { + return List.of(""); + } + + var chunkPositions = chunkPositions(input, chunkSize, overlap); + var chunks = new ArrayList(chunkPositions.size()); + for (var pos : chunkPositions) { + chunks.add(input.substring(pos.start, pos.end)); + } + return chunks; + } + + /** + * Chunk using the same strategy as {@link #chunk(String, int, int)} + * but return the chunk start and end offsets in the {@code input} string + * @param input Text to chunk + * @param chunkSize The number of words in each chunk + * @param overlap The number of words to overlap each chunk. + * Can be 0 but must be non-negative. + * @return List of chunked text positions + */ + List chunkPositions(String input, int chunkSize, int overlap) { if (overlap > 0 && overlap > chunkSize / 2) { throw new IllegalArgumentException( "Invalid chunking parameters, overlap [" @@ -59,10 +84,10 @@ public List chunk(String input, int chunkSize, int overlap) { } if (input.isEmpty()) { - return List.of(""); + return List.of(); } - var chunks = new ArrayList(); + var chunkPositions = new ArrayList(); // This position in the chunk is where the next overlapping chunk will start final int chunkSizeLessOverlap = chunkSize - overlap; @@ -81,7 +106,7 @@ public List chunk(String input, int chunkSize, int overlap) { wordsSinceStartWindowWasMarked++; if (wordsInChunkCountIncludingOverlap >= chunkSize) { - chunks.add(input.substring(windowStart, boundary)); + chunkPositions.add(new ChunkPosition(windowStart, boundary, wordsInChunkCountIncludingOverlap)); wordsInChunkCountIncludingOverlap = overlap; if (overlap == 0) { @@ -102,10 +127,10 @@ public List chunk(String input, int chunkSize, int overlap) { // Get the last chunk that was shorter than the required chunk size // if it ends on a boundary than the count should equal overlap in which case // we can ignore it, unless this is the first chunk in which case we want to add it - if (wordsInChunkCountIncludingOverlap > overlap || chunks.isEmpty()) { - chunks.add(input.substring(windowStart)); + if (wordsInChunkCountIncludingOverlap > overlap || chunkPositions.isEmpty()) { + chunkPositions.add(new ChunkPosition(windowStart, input.length(), wordsInChunkCountIncludingOverlap)); } - return chunks; + return chunkPositions; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java index 78e7b5cbbd95e..cbef4e39fae54 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java @@ -21,6 +21,13 @@ */ public final class SizeLimitInputStream extends FilterInputStream { + public static class InputStreamTooLargeException extends IOException { + + public InputStreamTooLargeException(String message) { + super(message); + } + } + private final long maxByteSize; private final AtomicLong byteCounter = new AtomicLong(0); @@ -73,9 +80,9 @@ public boolean markSupported() { return false; } - private void checkMaximumLengthReached() throws IOException { + private void checkMaximumLengthReached() throws InputStreamTooLargeException { if (byteCounter.get() > maxByteSize) { - throw new IOException("Maximum limit of [" + maxByteSize + "] bytes reached"); + throw new InputStreamTooLargeException("Maximum limit of [" + maxByteSize + "] bytes reached"); } } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreator.java new file mode 100644 index 0000000000000..5f9fc532e33b2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreator.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockChatCompletionRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockEmbeddingsRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; + +public class AmazonBedrockActionCreator implements AmazonBedrockActionVisitor { + private final Sender sender; + private final ServiceComponents serviceComponents; + private final TimeValue timeout; + + public AmazonBedrockActionCreator(Sender sender, ServiceComponents serviceComponents, @Nullable TimeValue timeout) { + this.sender = Objects.requireNonNull(sender); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + this.timeout = timeout; + } + + @Override + public ExecutableAction create(AmazonBedrockEmbeddingsModel embeddingsModel, Map taskSettings) { + var overriddenModel = AmazonBedrockEmbeddingsModel.of(embeddingsModel, taskSettings); + var requestManager = new AmazonBedrockEmbeddingsRequestManager( + overriddenModel, + serviceComponents.truncator(), + serviceComponents.threadPool(), + timeout + ); + var errorMessage = constructFailedToSendRequestMessage(null, "Amazon Bedrock embeddings"); + return new AmazonBedrockEmbeddingsAction(sender, requestManager, errorMessage); + } + + @Override + public ExecutableAction create(AmazonBedrockChatCompletionModel completionModel, Map taskSettings) { + var overriddenModel = AmazonBedrockChatCompletionModel.of(completionModel, taskSettings); + var requestManager = new AmazonBedrockChatCompletionRequestManager(overriddenModel, serviceComponents.threadPool(), timeout); + var errorMessage = constructFailedToSendRequestMessage(null, "Amazon Bedrock completion"); + return new AmazonBedrockChatCompletionAction(sender, requestManager, errorMessage); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionVisitor.java new file mode 100644 index 0000000000000..b540d030eb3f7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionVisitor.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.Map; + +public interface AmazonBedrockActionVisitor { + ExecutableAction create(AmazonBedrockEmbeddingsModel embeddingsModel, Map taskSettings); + + ExecutableAction create(AmazonBedrockChatCompletionModel completionModel, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockChatCompletionAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockChatCompletionAction.java new file mode 100644 index 0000000000000..9d3c39d3ac4d9 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockChatCompletionAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; + +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class AmazonBedrockChatCompletionAction implements ExecutableAction { + private final Sender sender; + private final RequestManager requestManager; + private final String errorMessage; + + public AmazonBedrockChatCompletionAction(Sender sender, RequestManager requestManager, String errorMessage) { + this.sender = Objects.requireNonNull(sender); + this.requestManager = Objects.requireNonNull(requestManager); + this.errorMessage = Objects.requireNonNull(errorMessage); + } + + @Override + public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + sender.send(requestManager, inferenceInputs, timeout, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockEmbeddingsAction.java new file mode 100644 index 0000000000000..3f8be0c3cccbe --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockEmbeddingsAction.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; + +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class AmazonBedrockEmbeddingsAction implements ExecutableAction { + + private final Sender sender; + private final RequestManager requestManager; + private final String errorMessage; + + public AmazonBedrockEmbeddingsAction(Sender sender, RequestManager requestManager, String errorMessage) { + this.sender = Objects.requireNonNull(sender); + this.requestManager = Objects.requireNonNull(requestManager); + this.errorMessage = Objects.requireNonNull(errorMessage); + } + + @Override + public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + sender.send(requestManager, inferenceInputs, timeout, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreator.java new file mode 100644 index 0000000000000..fa386c80643b0 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreator.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.anthropic; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; + +import java.util.Map; +import java.util.Objects; + +/** + * Provides a way to construct an {@link ExecutableAction} using the visitor pattern based on the anthropic model type. + */ +public class AnthropicActionCreator implements AnthropicActionVisitor { + private final Sender sender; + private final ServiceComponents serviceComponents; + + public AnthropicActionCreator(Sender sender, ServiceComponents serviceComponents) { + this.sender = Objects.requireNonNull(sender); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + } + + @Override + public ExecutableAction create(AnthropicChatCompletionModel model, Map taskSettings) { + var overriddenModel = AnthropicChatCompletionModel.of(model, taskSettings); + + return new AnthropicChatCompletionAction(sender, overriddenModel, serviceComponents); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionVisitor.java new file mode 100644 index 0000000000000..d2727c0e9b20c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionVisitor.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.anthropic; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; + +import java.util.Map; + +public interface AnthropicActionVisitor { + + ExecutableAction create(AnthropicChatCompletionModel model, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionAction.java new file mode 100644 index 0000000000000..9891d671764a4 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionAction.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.anthropic; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.AnthropicCompletionRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; + +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class AnthropicChatCompletionAction implements ExecutableAction { + + private final String errorMessage; + private final AnthropicCompletionRequestManager requestCreator; + + private final Sender sender; + + public AnthropicChatCompletionAction(Sender sender, AnthropicChatCompletionModel model, ServiceComponents serviceComponents) { + Objects.requireNonNull(serviceComponents); + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); + this.requestCreator = AnthropicCompletionRequestManager.of(model, serviceComponents.threadPool()); + this.errorMessage = constructFailedToSendRequestMessage(model.getUri(), "Anthropic chat completions"); + } + + @Override + public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { + if (inferenceInputs instanceof DocumentsOnlyInput == false) { + listener.onFailure(new ElasticsearchStatusException("Invalid inference input type", RestStatus.INTERNAL_SERVER_ERROR)); + return; + } + + var docsOnlyInput = (DocumentsOnlyInput) inferenceInputs; + if (docsOnlyInput.getInputs().size() > 1) { + listener.onFailure(new ElasticsearchStatusException("Anthropic completions only accepts 1 input", RestStatus.BAD_REQUEST)); + return; + } + + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); + + sender.send(requestCreator, inferenceInputs, timeout, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, errorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereRerankAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereRerankAction.java index 0613b8ef76453..98e92421b96d5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereRerankAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/cohere/CohereRerankAction.java @@ -32,10 +32,7 @@ public class CohereRerankAction implements ExecutableAction { public CohereRerankAction(Sender sender, CohereRerankModel model, ThreadPool threadPool) { Objects.requireNonNull(model); this.sender = Objects.requireNonNull(sender); - this.failedToSendRequestErrorMessage = constructFailedToSendRequestMessage( - model.getServiceSettings().getCommonSettings().uri(), - "Cohere rerank" - ); + this.failedToSendRequestErrorMessage = constructFailedToSendRequestMessage(model.getServiceSettings().uri(), "Cohere rerank"); requestCreator = CohereRerankRequestManager.of(model, threadPool); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java new file mode 100644 index 0000000000000..ed2a205151a4c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.googlevertexai; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; + +import java.util.Map; +import java.util.Objects; + +public class GoogleVertexAiActionCreator implements GoogleVertexAiActionVisitor { + + private final Sender sender; + + private final ServiceComponents serviceComponents; + + public GoogleVertexAiActionCreator(Sender sender, ServiceComponents serviceComponents) { + this.sender = Objects.requireNonNull(sender); + this.serviceComponents = Objects.requireNonNull(serviceComponents); + } + + @Override + public ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings) { + return new GoogleVertexAiEmbeddingsAction(sender, model, serviceComponents); + } + + @Override + public ExecutableAction create(GoogleVertexAiRerankModel model, Map taskSettings) { + return new GoogleVertexAiRerankAction(sender, model, serviceComponents.threadPool()); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java new file mode 100644 index 0000000000000..def8f09ce06be --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.googlevertexai; + +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; + +import java.util.Map; + +public interface GoogleVertexAiActionVisitor { + + ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings); + + ExecutableAction create(GoogleVertexAiRerankModel model, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiEmbeddingsAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiEmbeddingsAction.java new file mode 100644 index 0000000000000..f9814224c101a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiEmbeddingsAction.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.googlevertexai; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.GoogleVertexAiEmbeddingsRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; + +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class GoogleVertexAiEmbeddingsAction implements ExecutableAction { + + private final String failedToSendRequestErrorMessage; + + private final GoogleVertexAiEmbeddingsRequestManager requestManager; + + private final Sender sender; + + public GoogleVertexAiEmbeddingsAction(Sender sender, GoogleVertexAiEmbeddingsModel model, ServiceComponents serviceComponents) { + Objects.requireNonNull(serviceComponents); + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); + this.requestManager = new GoogleVertexAiEmbeddingsRequestManager( + model, + serviceComponents.truncator(), + serviceComponents.threadPool() + ); + this.failedToSendRequestErrorMessage = constructFailedToSendRequestMessage(model.uri(), "Google Vertex AI embeddings"); + } + + @Override + public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException( + failedToSendRequestErrorMessage, + listener + ); + + sender.send(requestManager, inferenceInputs, timeout, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, failedToSendRequestErrorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiRerankAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiRerankAction.java new file mode 100644 index 0000000000000..2827de3b1962d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiRerankAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.googlevertexai; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.http.sender.GoogleVertexAiRerankRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; + +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.createInternalServerError; +import static org.elasticsearch.xpack.inference.external.action.ActionUtils.wrapFailuresInElasticsearchException; + +public class GoogleVertexAiRerankAction implements ExecutableAction { + + private final String failedToSendRequestErrorMessage; + + private final Sender sender; + + private final GoogleVertexAiRerankRequestManager requestManager; + + public GoogleVertexAiRerankAction(Sender sender, GoogleVertexAiRerankModel model, ThreadPool threadPool) { + Objects.requireNonNull(model); + this.sender = Objects.requireNonNull(sender); + this.failedToSendRequestErrorMessage = constructFailedToSendRequestMessage(model.uri(), "Google Vertex AI rerank"); + this.requestManager = GoogleVertexAiRerankRequestManager.of(model, threadPool); + } + + @Override + public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { + try { + ActionListener wrappedListener = wrapFailuresInElasticsearchException( + failedToSendRequestErrorMessage, + listener + ); + sender.send(requestManager, inferenceInputs, timeout, wrappedListener); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } catch (Exception e) { + listener.onFailure(createInternalServerError(e, failedToSendRequestErrorMessage)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java new file mode 100644 index 0000000000000..f9e403582a0ec --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.time.Clock; +import java.util.Objects; + +public abstract class AmazonBedrockBaseClient implements AmazonBedrockClient { + protected final Integer modelKeysAndRegionHashcode; + protected Clock clock = Clock.systemUTC(); + + protected AmazonBedrockBaseClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + Objects.requireNonNull(model); + this.modelKeysAndRegionHashcode = getModelKeysAndRegionHashcode(model, timeout); + } + + public static Integer getModelKeysAndRegionHashcode(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var secretSettings = model.getSecretSettings(); + var serviceSettings = model.getServiceSettings(); + return Objects.hash(secretSettings.accessKey, secretSettings.secretKey, serviceSettings.region(), timeout); + } + + public final void setClock(Clock clock) { + this.clock = clock; + } + + abstract void close(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockChatCompletionExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockChatCompletionExecutor.java new file mode 100644 index 0000000000000..a4e0c399517c1 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockChatCompletionExecutor.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseListener; + +import java.util.function.Supplier; + +public class AmazonBedrockChatCompletionExecutor extends AmazonBedrockExecutor { + private final AmazonBedrockChatCompletionRequest chatCompletionRequest; + + protected AmazonBedrockChatCompletionExecutor( + AmazonBedrockChatCompletionRequest request, + AmazonBedrockResponseHandler responseHandler, + Logger logger, + Supplier hasRequestCompletedFunction, + ActionListener inferenceResultsListener, + AmazonBedrockClientCache clientCache + ) { + super(request, responseHandler, logger, hasRequestCompletedFunction, inferenceResultsListener, clientCache); + this.chatCompletionRequest = request; + } + + @Override + protected void executeClientRequest(AmazonBedrockBaseClient awsBedrockClient) { + var chatCompletionResponseListener = new AmazonBedrockChatCompletionResponseListener( + chatCompletionRequest, + responseHandler, + inferenceResultsListener + ); + chatCompletionRequest.executeChatCompletionRequest(awsBedrockClient, chatCompletionResponseListener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClient.java new file mode 100644 index 0000000000000..812e76129c420 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClient.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelRequest; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; + +import java.time.Instant; + +public interface AmazonBedrockClient { + void converse(ConverseRequest converseRequest, ActionListener responseListener) throws ElasticsearchException; + + void invokeModel(InvokeModelRequest invokeModelRequest, ActionListener responseListener) + throws ElasticsearchException; + + boolean isExpired(Instant currentTimestampMs); + + void resetExpiration(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClientCache.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClientCache.java new file mode 100644 index 0000000000000..e6bb99620b581 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockClientCache.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.io.Closeable; +import java.io.IOException; + +public interface AmazonBedrockClientCache extends Closeable { + AmazonBedrockBaseClient getOrCreateClient(AmazonBedrockModel model, @Nullable TimeValue timeout) throws IOException; +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockEmbeddingsExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockEmbeddingsExecutor.java new file mode 100644 index 0000000000000..6da3f86e0909a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockEmbeddingsExecutor.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseListener; + +import java.util.function.Supplier; + +public class AmazonBedrockEmbeddingsExecutor extends AmazonBedrockExecutor { + + private final AmazonBedrockEmbeddingsRequest embeddingsRequest; + + protected AmazonBedrockEmbeddingsExecutor( + AmazonBedrockEmbeddingsRequest request, + AmazonBedrockResponseHandler responseHandler, + Logger logger, + Supplier hasRequestCompletedFunction, + ActionListener inferenceResultsListener, + AmazonBedrockClientCache clientCache + ) { + super(request, responseHandler, logger, hasRequestCompletedFunction, inferenceResultsListener, clientCache); + this.embeddingsRequest = request; + } + + @Override + protected void executeClientRequest(AmazonBedrockBaseClient awsBedrockClient) { + var embeddingsResponseListener = new AmazonBedrockEmbeddingsResponseListener( + embeddingsRequest, + responseHandler, + inferenceResultsListener + ); + embeddingsRequest.executeEmbeddingsRequest(awsBedrockClient, embeddingsResponseListener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java new file mode 100644 index 0000000000000..a08acab655936 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecuteOnlyRequestSender.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.core.Strings.format; + +/** + * The AWS SDK uses its own internal retrier and timeout values on the client + */ +public class AmazonBedrockExecuteOnlyRequestSender implements RequestSender { + + protected final AmazonBedrockClientCache clientCache; + private final ThrottlerManager throttleManager; + + public AmazonBedrockExecuteOnlyRequestSender(AmazonBedrockClientCache clientCache, ThrottlerManager throttlerManager) { + this.clientCache = Objects.requireNonNull(clientCache); + this.throttleManager = Objects.requireNonNull(throttlerManager); + } + + @Override + public void send( + Logger logger, + Request request, + HttpClientContext context, + Supplier hasRequestTimedOutFunction, + ResponseHandler responseHandler, + ActionListener listener + ) { + if (request instanceof AmazonBedrockRequest awsRequest && responseHandler instanceof AmazonBedrockResponseHandler awsResponse) { + try { + var executor = createExecutor(awsRequest, awsResponse, logger, hasRequestTimedOutFunction, listener); + + // the run method will call the listener to return the proper value + executor.run(); + return; + } catch (Exception e) { + logException(logger, request, e); + listener.onFailure(wrapWithElasticsearchException(e, request.getInferenceEntityId())); + } + } + + listener.onFailure(new ElasticsearchException("Amazon Bedrock request was not the correct type")); + } + + // allow this to be overridden for testing + protected AmazonBedrockExecutor createExecutor( + AmazonBedrockRequest awsRequest, + AmazonBedrockResponseHandler awsResponse, + Logger logger, + Supplier hasRequestTimedOutFunction, + ActionListener listener + ) { + switch (awsRequest.taskType()) { + case COMPLETION -> { + return new AmazonBedrockChatCompletionExecutor( + (AmazonBedrockChatCompletionRequest) awsRequest, + awsResponse, + logger, + hasRequestTimedOutFunction, + listener, + clientCache + ); + } + case TEXT_EMBEDDING -> { + return new AmazonBedrockEmbeddingsExecutor( + (AmazonBedrockEmbeddingsRequest) awsRequest, + awsResponse, + logger, + hasRequestTimedOutFunction, + listener, + clientCache + ); + } + default -> { + throw new UnsupportedOperationException("Unsupported task type [" + awsRequest.taskType() + "] for Amazon Bedrock request"); + } + } + } + + private void logException(Logger logger, Request request, Exception exception) { + var causeException = ExceptionsHelper.unwrapCause(exception); + + throttleManager.warn( + logger, + format("Failed while sending request from inference entity id [%s] of type [amazonbedrock]", request.getInferenceEntityId()), + causeException + ); + } + + private Exception wrapWithElasticsearchException(Exception e, String inferenceEntityId) { + return new ElasticsearchException( + format("Amazon Bedrock client failed to send request from inference entity id [%s]", inferenceEntityId), + e + ); + } + + public void shutdown() throws IOException { + this.clientCache.close(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutor.java new file mode 100644 index 0000000000000..fa220ee5d2831 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutor.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.util.Objects; +import java.util.function.Supplier; + +public abstract class AmazonBedrockExecutor implements Runnable { + protected final AmazonBedrockModel baseModel; + protected final AmazonBedrockResponseHandler responseHandler; + protected final Logger logger; + protected final AmazonBedrockRequest request; + protected final Supplier hasRequestCompletedFunction; + protected final ActionListener inferenceResultsListener; + protected final AmazonBedrockClientCache clientCache; + + protected AmazonBedrockExecutor( + AmazonBedrockRequest request, + AmazonBedrockResponseHandler responseHandler, + Logger logger, + Supplier hasRequestCompletedFunction, + ActionListener inferenceResultsListener, + AmazonBedrockClientCache clientCache + ) { + this.request = Objects.requireNonNull(request); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.logger = Objects.requireNonNull(logger); + this.hasRequestCompletedFunction = Objects.requireNonNull(hasRequestCompletedFunction); + this.inferenceResultsListener = Objects.requireNonNull(inferenceResultsListener); + this.clientCache = Objects.requireNonNull(clientCache); + this.baseModel = request.model(); + } + + @Override + public void run() { + if (hasRequestCompletedFunction.get()) { + // has already been run + return; + } + + var inferenceEntityId = baseModel.getInferenceEntityId(); + + try { + var awsBedrockClient = clientCache.getOrCreateClient(baseModel, request.timeout()); + executeClientRequest(awsBedrockClient); + } catch (Exception e) { + var errorMessage = Strings.format("Failed to send request from inference entity id [%s]", inferenceEntityId); + logger.warn(errorMessage, e); + inferenceResultsListener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } + + protected abstract void executeClientRequest(AmazonBedrockBaseClient awsBedrockClient); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java new file mode 100644 index 0000000000000..c3d458925268c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.services.bedrockruntime.AmazonBedrockRuntimeAsync; +import com.amazonaws.services.bedrockruntime.AmazonBedrockRuntimeAsyncClientBuilder; +import com.amazonaws.services.bedrockruntime.model.AmazonBedrockRuntimeException; +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelRequest; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.time.Duration; +import java.time.Instant; +import java.util.Objects; + +/** + * Not marking this as "final" so we can subclass it for mocking + */ +public class AmazonBedrockInferenceClient extends AmazonBedrockBaseClient { + + // package-private for testing + static final int CLIENT_CACHE_EXPIRY_MINUTES = 5; + private static final int DEFAULT_CLIENT_TIMEOUT_MS = 10000; + + private final AmazonBedrockRuntimeAsync internalClient; + private volatile Instant expiryTimestamp; + + public static AmazonBedrockBaseClient create(AmazonBedrockModel model, @Nullable TimeValue timeout) { + try { + return new AmazonBedrockInferenceClient(model, timeout); + } catch (Exception e) { + throw new ElasticsearchException("Failed to create Amazon Bedrock Client", e); + } + } + + protected AmazonBedrockInferenceClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + super(model, timeout); + this.internalClient = createAmazonBedrockClient(model, timeout); + setExpiryTimestamp(); + } + + @Override + public void converse(ConverseRequest converseRequest, ActionListener responseListener) throws ElasticsearchException { + try { + var responseFuture = internalClient.converseAsync(converseRequest); + responseListener.onResponse(responseFuture.get()); + } catch (AmazonBedrockRuntimeException amazonBedrockRuntimeException) { + responseListener.onFailure( + new ElasticsearchException( + Strings.format("AmazonBedrock converse failure: [%s]", amazonBedrockRuntimeException.getMessage()), + amazonBedrockRuntimeException + ) + ); + } catch (ElasticsearchException elasticsearchException) { + // just throw the exception if we have one + responseListener.onFailure(elasticsearchException); + } catch (Exception e) { + responseListener.onFailure(new ElasticsearchException("Amazon Bedrock client converse call failed", e)); + } + } + + @Override + public void invokeModel(InvokeModelRequest invokeModelRequest, ActionListener responseListener) + throws ElasticsearchException { + try { + var responseFuture = internalClient.invokeModelAsync(invokeModelRequest); + responseListener.onResponse(responseFuture.get()); + } catch (AmazonBedrockRuntimeException amazonBedrockRuntimeException) { + responseListener.onFailure( + new ElasticsearchException( + Strings.format("AmazonBedrock invoke model failure: [%s]", amazonBedrockRuntimeException.getMessage()), + amazonBedrockRuntimeException + ) + ); + } catch (ElasticsearchException elasticsearchException) { + // just throw the exception if we have one + responseListener.onFailure(elasticsearchException); + } catch (Exception e) { + responseListener.onFailure(new ElasticsearchException(e)); + } + } + + // allow this to be overridden for test mocks + protected AmazonBedrockRuntimeAsync createAmazonBedrockClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var secretSettings = model.getSecretSettings(); + var credentials = new BasicAWSCredentials(secretSettings.accessKey.toString(), secretSettings.secretKey.toString()); + var credentialsProvider = new AWSStaticCredentialsProvider(credentials); + var clientConfig = timeout == null + ? new ClientConfiguration().withConnectionTimeout(DEFAULT_CLIENT_TIMEOUT_MS) + : new ClientConfiguration().withConnectionTimeout((int) timeout.millis()); + + var serviceSettings = model.getServiceSettings(); + + try { + SpecialPermission.check(); + AmazonBedrockRuntimeAsyncClientBuilder builder = AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> AmazonBedrockRuntimeAsyncClientBuilder.standard() + .withCredentials(credentialsProvider) + .withRegion(serviceSettings.region()) + .withClientConfiguration(clientConfig) + ); + + return SocketAccess.doPrivileged(builder::build); + } catch (AmazonBedrockRuntimeException amazonBedrockRuntimeException) { + throw new ElasticsearchException( + Strings.format("failed to create AmazonBedrockRuntime client: [%s]", amazonBedrockRuntimeException.getMessage()), + amazonBedrockRuntimeException + ); + } catch (Exception e) { + throw new ElasticsearchException("failed to create AmazonBedrockRuntime client", e); + } + } + + private void setExpiryTimestamp() { + this.expiryTimestamp = clock.instant().plus(Duration.ofMinutes(CLIENT_CACHE_EXPIRY_MINUTES)); + } + + @Override + public boolean isExpired(Instant currentTimestampMs) { + Objects.requireNonNull(currentTimestampMs); + return currentTimestampMs.isAfter(expiryTimestamp); + } + + public void resetExpiration() { + setExpiryTimestamp(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockInferenceClient that = (AmazonBedrockInferenceClient) o; + return Objects.equals(modelKeysAndRegionHashcode, that.modelKeysAndRegionHashcode); + } + + @Override + public int hashCode() { + return this.modelKeysAndRegionHashcode; + } + + // make this package-private so only the cache can close it + @Override + void close() { + internalClient.shutdown(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java new file mode 100644 index 0000000000000..e245365c214af --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.http.IdleConnectionReaper; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiFunction; + +public final class AmazonBedrockInferenceClientCache implements AmazonBedrockClientCache { + + private final BiFunction creator; + private final Map clientsCache = new ConcurrentHashMap<>(); + private final ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(); + + // not final for testing + private Clock clock; + + public AmazonBedrockInferenceClientCache( + BiFunction creator, + @Nullable Clock clock + ) { + this.creator = Objects.requireNonNull(creator); + this.clock = Objects.requireNonNullElse(clock, Clock.systemUTC()); + } + + public AmazonBedrockBaseClient getOrCreateClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var returnClient = internalGetOrCreateClient(model, timeout); + flushExpiredClients(); + return returnClient; + } + + private AmazonBedrockBaseClient internalGetOrCreateClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + final Integer modelHash = AmazonBedrockInferenceClient.getModelKeysAndRegionHashcode(model, timeout); + cacheLock.readLock().lock(); + try { + return clientsCache.computeIfAbsent(modelHash, hashKey -> { + final AmazonBedrockBaseClient builtClient = creator.apply(model, timeout); + builtClient.setClock(clock); + builtClient.resetExpiration(); + return builtClient; + }); + } finally { + cacheLock.readLock().unlock(); + } + } + + private void flushExpiredClients() { + var currentTimestampMs = clock.instant(); + var expiredClients = new ArrayList>(); + + cacheLock.readLock().lock(); + try { + for (final Map.Entry client : clientsCache.entrySet()) { + if (client.getValue().isExpired(currentTimestampMs)) { + expiredClients.add(client); + } + } + + if (expiredClients.isEmpty()) { + return; + } + + cacheLock.readLock().unlock(); + cacheLock.writeLock().lock(); + try { + for (final Map.Entry client : expiredClients) { + var removed = clientsCache.remove(client.getKey()); + if (removed != null) { + removed.close(); + } + } + } finally { + cacheLock.readLock().lock(); + cacheLock.writeLock().unlock(); + } + } finally { + cacheLock.readLock().unlock(); + } + } + + @Override + public void close() throws IOException { + releaseCachedClients(); + } + + private void releaseCachedClients() { + // as we're closing and flushing all of these - we'll use a write lock + // across the whole operation to ensure this stays in sync + cacheLock.writeLock().lock(); + try { + // ensure all the clients are closed before we clear + for (final AmazonBedrockBaseClient client : clientsCache.values()) { + client.close(); + } + + // clear previously cached clients, they will be build lazily + clientsCache.clear(); + } finally { + cacheLock.writeLock().unlock(); + } + + // shutdown IdleConnectionReaper background thread + // it will be restarted on new client usage + IdleConnectionReaper.shutdown(); + } + + // used for testing + int clientCount() { + cacheLock.readLock().lock(); + try { + return clientsCache.size(); + } finally { + cacheLock.readLock().unlock(); + } + } + + // used for testing + void setClock(Clock newClock) { + this.clock = Objects.requireNonNull(newClock); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSender.java new file mode 100644 index 0000000000000..e23b0274ede26 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSender.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockRequestExecutorService; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +public class AmazonBedrockRequestSender implements Sender { + + public static class Factory { + private final ServiceComponents serviceComponents; + private final ClusterService clusterService; + + public Factory(ServiceComponents serviceComponents, ClusterService clusterService) { + this.serviceComponents = Objects.requireNonNull(serviceComponents); + this.clusterService = Objects.requireNonNull(clusterService); + } + + public Sender createSender() { + var clientCache = new AmazonBedrockInferenceClientCache(AmazonBedrockInferenceClient::create, null); + return createSender(new AmazonBedrockExecuteOnlyRequestSender(clientCache, serviceComponents.throttlerManager())); + } + + Sender createSender(AmazonBedrockExecuteOnlyRequestSender requestSender) { + var sender = new AmazonBedrockRequestSender( + serviceComponents.threadPool(), + clusterService, + serviceComponents.settings(), + Objects.requireNonNull(requestSender) + ); + // ensure this is started + sender.start(); + return sender; + } + } + + private static final TimeValue START_COMPLETED_WAIT_TIME = TimeValue.timeValueSeconds(5); + + private final ThreadPool threadPool; + private final AmazonBedrockRequestExecutorService executorService; + private final AtomicBoolean started = new AtomicBoolean(false); + private final CountDownLatch startCompleted = new CountDownLatch(1); + + protected AmazonBedrockRequestSender( + ThreadPool threadPool, + ClusterService clusterService, + Settings settings, + AmazonBedrockExecuteOnlyRequestSender requestSender + ) { + this.threadPool = Objects.requireNonNull(threadPool); + executorService = new AmazonBedrockRequestExecutorService( + threadPool, + startCompleted, + new RequestExecutorServiceSettings(settings, clusterService), + requestSender + ); + } + + @Override + public void start() { + if (started.compareAndSet(false, true)) { + // The manager must be started before the executor service. That way we guarantee that the http client + // is ready prior to the service attempting to use the http client to send a request + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(executorService::start); + waitForStartToComplete(); + } + } + + private void waitForStartToComplete() { + try { + if (startCompleted.await(START_COMPLETED_WAIT_TIME.getSeconds(), TimeUnit.SECONDS) == false) { + throw new IllegalStateException("Amazon Bedrock sender startup did not complete in time"); + } + } catch (InterruptedException e) { + throw new IllegalStateException("Amazon Bedrock sender interrupted while waiting for startup to complete"); + } + } + + @Override + public void send( + RequestManager requestCreator, + InferenceInputs inferenceInputs, + TimeValue timeout, + ActionListener listener + ) { + assert started.get() : "Amazon Bedrock request sender: call start() before sending a request"; + waitForStartToComplete(); + + if (requestCreator instanceof AmazonBedrockRequestManager amazonBedrockRequestManager) { + executorService.execute(amazonBedrockRequestManager, inferenceInputs, timeout, listener); + return; + } + + listener.onFailure(new ElasticsearchException("Amazon Bedrock request sender did not receive a valid request request manager")); + } + + @Override + public void close() throws IOException { + executorService.shutdown(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicAccount.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicAccount.java new file mode 100644 index 0000000000000..fb74188b10995 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicAccount.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.anthropic; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicModel; + +import java.net.URI; +import java.util.Objects; + +public record AnthropicAccount(URI uri, SecureString apiKey) { + + public static AnthropicAccount of(AnthropicModel model) { + return new AnthropicAccount(model.getUri(), model.apiKey()); + } + + public AnthropicAccount { + Objects.requireNonNull(uri); + Objects.requireNonNull(apiKey); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicResponseHandler.java new file mode 100644 index 0000000000000..cab2c655b9ffb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicResponseHandler.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.anthropic; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.BaseResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseParser; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.response.ErrorMessageResponseEntity; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import static org.elasticsearch.xpack.inference.external.http.HttpUtils.checkForEmptyBody; +import static org.elasticsearch.xpack.inference.external.http.retry.ResponseHandlerUtils.getFirstHeaderOrUnknown; + +public class AnthropicResponseHandler extends BaseResponseHandler { + /** + * Rate limit headers taken from https://docs.anthropic.com/en/api/rate-limits#response-headers + */ + // The maximum number of requests allowed within the rate limit window. + static final String REQUESTS_LIMIT = "anthropic-ratelimit-requests-limit"; + // The number of requests remaining within the current rate limit window. + static final String REMAINING_REQUESTS = "anthropic-ratelimit-requests-remaining"; + // The time when the request rate limit window will reset, provided in RFC 3339 format. + static final String REQUEST_RESET = "anthropic-ratelimit-requests-reset"; + // The maximum number of tokens allowed within the rate limit window. + static final String TOKENS_LIMIT = "anthropic-ratelimit-tokens-limit"; + // The number of tokens remaining, rounded to the nearest thousand, within the current rate limit window. + static final String REMAINING_TOKENS = "anthropic-ratelimit-tokens-remaining"; + // The time when the token rate limit window will reset, provided in RFC 3339 format. + static final String TOKENS_RESET = "anthropic-ratelimit-tokens-reset"; + // The number of seconds until the rate limit window resets. + static final String RETRY_AFTER = "retry-after"; + + static final String SERVER_BUSY = "Received an Anthropic server is temporarily overloaded status code"; + + public AnthropicResponseHandler(String requestType, ResponseParser parseFunction) { + super(requestType, parseFunction, ErrorMessageResponseEntity::fromResponse); + } + + @Override + public void validateResponse(ThrottlerManager throttlerManager, Logger logger, Request request, HttpResult result) + throws RetryException { + checkForFailureStatusCode(request, result); + checkForEmptyBody(throttlerManager, logger, request, result); + } + + /** + * Validates the status code throws an RetryException if not in the range [200, 300). + * + * The Anthropic API error codes are documented here. + * @param request The originating request + * @param result The http response and body + * @throws RetryException Throws if status code is {@code >= 300 or < 200 } + */ + void checkForFailureStatusCode(Request request, HttpResult result) throws RetryException { + int statusCode = result.response().getStatusLine().getStatusCode(); + if (statusCode >= 200 && statusCode < 300) { + return; + } + + // handle error codes + if (statusCode == 500) { + throw new RetryException(true, buildError(SERVER_ERROR, request, result)); + } else if (statusCode == 529) { + throw new RetryException(true, buildError(SERVER_BUSY, request, result)); + } else if (statusCode > 500) { + throw new RetryException(false, buildError(SERVER_ERROR, request, result)); + } else if (statusCode == 429) { + throw new RetryException(true, buildError(buildRateLimitErrorMessage(result), request, result)); + } else if (statusCode == 403) { + throw new RetryException(false, buildError(PERMISSION_DENIED, request, result)); + } else if (statusCode == 401) { + throw new RetryException(false, buildError(AUTHENTICATION, request, result)); + } else if (statusCode >= 300 && statusCode < 400) { + throw new RetryException(false, buildError(REDIRECTION, request, result)); + } else { + throw new RetryException(false, buildError(UNSUCCESSFUL, request, result)); + } + } + + static String buildRateLimitErrorMessage(HttpResult result) { + var response = result.response(); + var tokenLimit = getFirstHeaderOrUnknown(response, TOKENS_LIMIT); + var remainingTokens = getFirstHeaderOrUnknown(response, REMAINING_TOKENS); + var requestLimit = getFirstHeaderOrUnknown(response, REQUESTS_LIMIT); + var remainingRequests = getFirstHeaderOrUnknown(response, REMAINING_REQUESTS); + var requestReset = getFirstHeaderOrUnknown(response, REQUEST_RESET); + var tokensReset = getFirstHeaderOrUnknown(response, TOKENS_RESET); + var retryAfter = getFirstHeaderOrUnknown(response, RETRY_AFTER); + + var usageMessage = Strings.format( + "Token limit [%s], remaining tokens [%s], tokens reset [%s]. " + + "Request limit [%s], remaining requests [%s], request reset [%s]. Retry after [%s]", + tokenLimit, + remainingTokens, + tokensReset, + requestLimit, + remainingRequests, + requestReset, + retryAfter + ); + + return RATE_LIMIT + ". " + usageMessage; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/googlevertexai/GoogleVertexAiResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/googlevertexai/GoogleVertexAiResponseHandler.java new file mode 100644 index 0000000000000..872bf51f3662a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/googlevertexai/GoogleVertexAiResponseHandler.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.googlevertexai; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.BaseResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseParser; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.response.googlevertexai.GoogleVertexAiErrorResponseEntity; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.http.HttpUtils.checkForEmptyBody; + +public class GoogleVertexAiResponseHandler extends BaseResponseHandler { + + static final String GOOGLE_VERTEX_AI_UNAVAILABLE = "The Google Vertex AI service may be temporarily overloaded or down"; + + public GoogleVertexAiResponseHandler(String requestType, ResponseParser parseFunction) { + super(requestType, parseFunction, GoogleVertexAiErrorResponseEntity::fromResponse); + } + + @Override + public void validateResponse(ThrottlerManager throttlerManager, Logger logger, Request request, HttpResult result) + throws RetryException { + checkForFailureStatusCode(request, result); + checkForEmptyBody(throttlerManager, logger, request, result); + } + + void checkForFailureStatusCode(Request request, HttpResult result) throws RetryException { + int statusCode = result.response().getStatusLine().getStatusCode(); + if (statusCode >= 200 && statusCode < 300) { + return; + } + + // handle error codes + if (statusCode == 500) { + throw new RetryException(true, buildError(SERVER_ERROR, request, result)); + } else if (statusCode == 503) { + throw new RetryException(true, buildError(GOOGLE_VERTEX_AI_UNAVAILABLE, request, result)); + } else if (statusCode > 500) { + throw new RetryException(false, buildError(SERVER_ERROR, request, result)); + } else if (statusCode == 429) { + throw new RetryException(true, buildError(RATE_LIMIT, request, result)); + } else if (statusCode == 404) { + throw new RetryException(false, buildError(resourceNotFoundError(request), request, result)); + } else if (statusCode == 403) { + throw new RetryException(false, buildError(PERMISSION_DENIED, request, result)); + } else if (statusCode >= 300 && statusCode < 400) { + throw new RetryException(false, buildError(REDIRECTION, request, result)); + } else { + throw new RetryException(false, buildError(UNSUCCESSFUL, request, result)); + } + } + + private static String resourceNotFoundError(Request request) { + return format("Resource not found at [%s]", request.getURI()); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java index ef5fec24c3d59..642b76d775173 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java @@ -20,9 +20,9 @@ public class HttpSettings { // These settings are default scope for testing static final Setting MAX_HTTP_RESPONSE_SIZE = Setting.byteSizeSetting( "xpack.inference.http.max_response_size", - new ByteSizeValue(10, ByteSizeUnit.MB), // default + new ByteSizeValue(50, ByteSizeUnit.MB), // default ByteSizeValue.ONE, // min - new ByteSizeValue(50, ByteSizeUnit.MB), // max + new ByteSizeValue(100, ByteSizeUnit.MB), // max Setting.Property.NodeScope, Setting.Property.Dynamic ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java index ffe10ffe3b6ae..dd45501564e4e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSender.java @@ -17,6 +17,7 @@ import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.common.SizeLimitInputStream; import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpResult; import org.elasticsearch.xpack.inference.external.request.Request; @@ -26,12 +27,16 @@ import java.net.UnknownHostException; import java.util.Objects; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; public class RetryingHttpSender implements RequestSender { + + static final int MAX_RETIES = 3; + private final HttpClient httpClient; private final ThrottlerManager throttlerManager; private final RetrySettings retrySettings; @@ -68,6 +73,7 @@ private class InternalRetrier extends RetryableAction { private final Logger logger; private final HttpClientContext context; private final Supplier hasRequestCompletedFunction; + private final AtomicInteger retryCount; InternalRetrier( Logger logger, @@ -91,10 +97,12 @@ private class InternalRetrier extends RetryableAction { this.context = Objects.requireNonNull(context); this.responseHandler = Objects.requireNonNull(responseHandler); this.hasRequestCompletedFunction = Objects.requireNonNull(hasRequestCompletedFunction); + this.retryCount = new AtomicInteger(0); } @Override public void tryAction(ActionListener listener) { + retryCount.incrementAndGet(); // A timeout likely occurred so let's stop attempting to execute the request if (hasRequestCompletedFunction.get()) { return; @@ -140,10 +148,10 @@ private Exception transformIfRetryable(Exception e) { RestStatus.BAD_REQUEST, e ); - } - - if (e instanceof IOException) { - exceptionToReturn = new RetryException(true, e); + } else if (e instanceof SizeLimitInputStream.InputStreamTooLargeException) { + return e; + } else if (e instanceof IOException) { + return new RetryException(true, e); } return exceptionToReturn; @@ -164,6 +172,10 @@ private Exception wrapWithElasticsearchException(Exception e, String inferenceEn @Override public boolean shouldRetry(Exception e) { + if (retryCount.get() >= MAX_RETIES) { + return false; + } + if (e instanceof Retryable retry) { request = retry.rebuildRequest(request); return retry.shouldRetry(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java new file mode 100644 index 0000000000000..8642a19b26a7d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockChatCompletionRequestManager.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionEntityFactory; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; + +import java.util.List; +import java.util.function.Supplier; + +public class AmazonBedrockChatCompletionRequestManager extends AmazonBedrockRequestManager { + private static final Logger logger = LogManager.getLogger(AmazonBedrockChatCompletionRequestManager.class); + private final AmazonBedrockChatCompletionModel model; + + public AmazonBedrockChatCompletionRequestManager( + AmazonBedrockChatCompletionModel model, + ThreadPool threadPool, + @Nullable TimeValue timeout + ) { + super(model, threadPool, timeout); + this.model = model; + } + + @Override + public void execute( + InferenceInputs inferenceInputs, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var requestEntity = AmazonBedrockChatCompletionEntityFactory.createEntity(model, docsInput); + var request = new AmazonBedrockChatCompletionRequest(model, requestEntity, timeout); + var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); + + try { + requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + } catch (Exception e) { + var errorMessage = Strings.format( + "Failed to send [completion] request from inference entity id [%s]", + request.getInferenceEntityId() + ); + logger.warn(errorMessage, e); + listener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java new file mode 100644 index 0000000000000..2f94cdf342938 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockEmbeddingsRequestManager.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsEntityFactory; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.inference.common.Truncator.truncate; + +public class AmazonBedrockEmbeddingsRequestManager extends AmazonBedrockRequestManager { + private static final Logger logger = LogManager.getLogger(AmazonBedrockEmbeddingsRequestManager.class); + + private final AmazonBedrockEmbeddingsModel embeddingsModel; + private final Truncator truncator; + + public AmazonBedrockEmbeddingsRequestManager( + AmazonBedrockEmbeddingsModel model, + Truncator truncator, + ThreadPool threadPool, + @Nullable TimeValue timeout + ) { + super(model, threadPool, timeout); + this.embeddingsModel = model; + this.truncator = Objects.requireNonNull(truncator); + } + + @Override + public void execute( + InferenceInputs inferenceInputs, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var serviceSettings = embeddingsModel.getServiceSettings(); + var truncatedInput = truncate(docsInput, serviceSettings.maxInputTokens()); + var requestEntity = AmazonBedrockEmbeddingsEntityFactory.createEntity(embeddingsModel, truncatedInput); + var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); + var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, embeddingsModel, requestEntity, timeout); + try { + requestSender.send(logger, request, HttpClientContext.create(), hasRequestCompletedFunction, responseHandler, listener); + } catch (Exception e) { + var errorMessage = Strings.format( + "Failed to send [text_embedding] request from inference entity id [%s]", + request.getInferenceEntityId() + ); + logger.warn(errorMessage, e); + listener.onFailure(new ElasticsearchException(errorMessage, e)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestExecutorService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestExecutorService.java new file mode 100644 index 0000000000000..8b4672d45c250 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestExecutorService.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockExecuteOnlyRequestSender; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +/** + * Allows this to have a public interface for Amazon Bedrock support + */ +public class AmazonBedrockRequestExecutorService extends RequestExecutorService { + + private final AmazonBedrockExecuteOnlyRequestSender requestSender; + + public AmazonBedrockRequestExecutorService( + ThreadPool threadPool, + CountDownLatch startupLatch, + RequestExecutorServiceSettings settings, + AmazonBedrockExecuteOnlyRequestSender requestSender + ) { + super(threadPool, startupLatch, settings, requestSender); + this.requestSender = requestSender; + } + + @Override + public void shutdown() { + super.shutdown(); + try { + requestSender.shutdown(); + } catch (IOException e) { + // swallow the exception + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestManager.java new file mode 100644 index 0000000000000..f75343b038368 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AmazonBedrockRequestManager.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.util.Objects; + +public abstract class AmazonBedrockRequestManager implements RequestManager { + + protected final ThreadPool threadPool; + protected final TimeValue timeout; + private final AmazonBedrockModel baseModel; + + protected AmazonBedrockRequestManager(AmazonBedrockModel baseModel, ThreadPool threadPool, @Nullable TimeValue timeout) { + this.baseModel = Objects.requireNonNull(baseModel); + this.threadPool = Objects.requireNonNull(threadPool); + this.timeout = timeout; + } + + @Override + public String inferenceEntityId() { + return baseModel.getInferenceEntityId(); + } + + @Override + public RateLimitSettings rateLimitSettings() { + return baseModel.rateLimitSettings(); + } + + record RateLimitGrouping(int keyHash) { + public static AmazonBedrockRequestManager.RateLimitGrouping of(AmazonBedrockModel model) { + Objects.requireNonNull(model); + + var awsSecretSettings = model.getSecretSettings(); + + return new RateLimitGrouping(Objects.hash(awsSecretSettings.accessKey, awsSecretSettings.secretKey)); + } + } + + @Override + public Object rateLimitGrouping() { + return RateLimitGrouping.of(this.baseModel); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicCompletionRequestManager.java new file mode 100644 index 0000000000000..7c527bbd2ee98 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicCompletionRequestManager.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.anthropic.AnthropicResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.anthropic.AnthropicChatCompletionResponseEntity; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class AnthropicCompletionRequestManager extends AnthropicRequestManager { + + private static final Logger logger = LogManager.getLogger(AnthropicCompletionRequestManager.class); + + private static final ResponseHandler HANDLER = createCompletionHandler(); + + public static AnthropicCompletionRequestManager of(AnthropicChatCompletionModel model, ThreadPool threadPool) { + return new AnthropicCompletionRequestManager(Objects.requireNonNull(model), Objects.requireNonNull(threadPool)); + } + + private final AnthropicChatCompletionModel model; + + private AnthropicCompletionRequestManager(AnthropicChatCompletionModel model, ThreadPool threadPool) { + super(threadPool, model); + this.model = Objects.requireNonNull(model); + } + + @Override + public void execute( + InferenceInputs inferenceInputs, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + AnthropicChatCompletionRequest request = new AnthropicChatCompletionRequest(docsInput, model); + + execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); + } + + private static ResponseHandler createCompletionHandler() { + return new AnthropicResponseHandler("anthropic completions", AnthropicChatCompletionResponseEntity::fromResponse); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicRequestManager.java new file mode 100644 index 0000000000000..a47910c0b37c8 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AnthropicRequestManager.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.anthropic.AnthropicAccount; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicModel; + +import java.util.Objects; + +abstract class AnthropicRequestManager extends BaseRequestManager { + + protected AnthropicRequestManager(ThreadPool threadPool, AnthropicModel model) { + super(threadPool, model.getInferenceEntityId(), RateLimitGrouping.of(model), model.rateLimitServiceSettings().rateLimitSettings()); + } + + record RateLimitGrouping(int accountHash, int modelIdHash) { + public static RateLimitGrouping of(AnthropicModel model) { + Objects.requireNonNull(model); + + return new RateLimitGrouping(AnthropicAccount.of(model).hashCode(), model.rateLimitServiceSettings().modelId().hashCode()); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java index 002fa71b7fb5d..c5e5a5251f7db 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioChatCompletionRequestManager.java @@ -15,8 +15,8 @@ import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; import org.elasticsearch.xpack.inference.external.request.azureaistudio.AzureAiStudioChatCompletionRequest; -import org.elasticsearch.xpack.inference.external.response.AzureMistralOpenAiErrorResponseEntity; import org.elasticsearch.xpack.inference.external.response.AzureMistralOpenAiExternalResponseHandler; +import org.elasticsearch.xpack.inference.external.response.ErrorMessageResponseEntity; import org.elasticsearch.xpack.inference.external.response.azureaistudio.AzureAiStudioChatCompletionResponseEntity; import org.elasticsearch.xpack.inference.services.azureaistudio.completion.AzureAiStudioChatCompletionModel; @@ -37,13 +37,13 @@ public AzureAiStudioChatCompletionRequestManager(AzureAiStudioChatCompletionMode @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - AzureAiStudioChatCompletionRequest request = new AzureAiStudioChatCompletionRequest(model, input); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + AzureAiStudioChatCompletionRequest request = new AzureAiStudioChatCompletionRequest(model, docsInput); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } @@ -52,7 +52,7 @@ private static ResponseHandler createCompletionHandler() { return new AzureMistralOpenAiExternalResponseHandler( "azure ai studio completion", new AzureAiStudioChatCompletionResponseEntity(), - AzureMistralOpenAiErrorResponseEntity::fromResponse + ErrorMessageResponseEntity::fromResponse ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioEmbeddingsRequestManager.java index ec5ab2fee6a57..c610a7f31f7ba 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureAiStudioEmbeddingsRequestManager.java @@ -16,8 +16,8 @@ import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; import org.elasticsearch.xpack.inference.external.request.azureaistudio.AzureAiStudioEmbeddingsRequest; -import org.elasticsearch.xpack.inference.external.response.AzureMistralOpenAiErrorResponseEntity; import org.elasticsearch.xpack.inference.external.response.AzureMistralOpenAiExternalResponseHandler; +import org.elasticsearch.xpack.inference.external.response.ErrorMessageResponseEntity; import org.elasticsearch.xpack.inference.external.response.azureaistudio.AzureAiStudioEmbeddingsResponseEntity; import org.elasticsearch.xpack.inference.services.azureaistudio.embeddings.AzureAiStudioEmbeddingsModel; @@ -41,13 +41,13 @@ public AzureAiStudioEmbeddingsRequestManager(AzureAiStudioEmbeddingsModel model, @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var truncatedInput = truncate(docsInput, model.getServiceSettings().maxInputTokens()); AzureAiStudioEmbeddingsRequest request = new AzureAiStudioEmbeddingsRequest(truncator, truncatedInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } @@ -56,7 +56,7 @@ private static ResponseHandler createEmbeddingsHandler() { return new AzureMistralOpenAiExternalResponseHandler( "azure ai studio text embedding", new AzureAiStudioEmbeddingsResponseEntity(), - AzureMistralOpenAiErrorResponseEntity::fromResponse + ErrorMessageResponseEntity::fromResponse ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java index 5206d6c2c23cc..8c9b848f78e3c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiCompletionRequestManager.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.inference.external.azureopenai.AzureOpenAiResponseHandler; @@ -43,13 +42,13 @@ public AzureOpenAiCompletionRequestManager(AzureOpenAiCompletionModel model, Thr @Override public void execute( - @Nullable String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - AzureOpenAiCompletionRequest request = new AzureOpenAiCompletionRequest(input, model); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + AzureOpenAiCompletionRequest request = new AzureOpenAiCompletionRequest(docsInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiEmbeddingsRequestManager.java index e0fcee30e5af3..8d4162858b36f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/AzureOpenAiEmbeddingsRequestManager.java @@ -55,13 +55,14 @@ public AzureOpenAiEmbeddingsRequestManager(AzureOpenAiEmbeddingsModel model, Tru @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var truncatedInput = truncate(docsInput, model.getServiceSettings().maxInputTokens()); + AzureOpenAiEmbeddingsRequest request = new AzureOpenAiEmbeddingsRequest(truncator, truncatedInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java index 8a4b0e45b93fa..423093a14a9f0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereCompletionRequestManager.java @@ -46,13 +46,13 @@ private CohereCompletionRequestManager(CohereCompletionModel model, ThreadPool t @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - CohereCompletionRequest request = new CohereCompletionRequest(input, model); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + CohereCompletionRequest request = new CohereCompletionRequest(docsInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsRequestManager.java index a51910f1d0a67..402f91a0838dc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereEmbeddingsRequestManager.java @@ -44,13 +44,13 @@ private CohereEmbeddingsRequestManager(CohereEmbeddingsModel model, ThreadPool t @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - CohereEmbeddingsRequest request = new CohereEmbeddingsRequest(input, model); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + CohereEmbeddingsRequest request = new CohereEmbeddingsRequest(docsInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereRerankRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereRerankRequestManager.java index 1351eec406569..9d565e7124b03 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereRerankRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/CohereRerankRequestManager.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.inference.external.response.cohere.CohereRankedResponseEntity; import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankModel; -import java.util.List; import java.util.Objects; import java.util.function.Supplier; @@ -44,13 +43,13 @@ private CohereRerankRequestManager(CohereRerankModel model, ThreadPool threadPoo @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - CohereRerankRequest request = new CohereRerankRequest(query, input, model); + var rerankInput = QueryAndDocsInputs.of(inferenceInputs); + CohereRerankRequest request = new CohereRerankRequest(rerankInput.getQuery(), rerankInput.getChunks(), model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java index a11be003585fd..a32e2018117f8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/DocumentsOnlyInput.java @@ -12,7 +12,15 @@ public class DocumentsOnlyInput extends InferenceInputs { - List input; + public static DocumentsOnlyInput of(InferenceInputs inferenceInputs) { + if (inferenceInputs instanceof DocumentsOnlyInput == false) { + throw createUnsupportedTypeException(inferenceInputs); + } + + return (DocumentsOnlyInput) inferenceInputs; + } + + private final List input; public DocumentsOnlyInput(List chunks) { super(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java index 2b191b046477b..426102f7f2376 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioCompletionRequestManager.java @@ -42,13 +42,13 @@ public GoogleAiStudioCompletionRequestManager(GoogleAiStudioCompletionModel mode @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - GoogleAiStudioCompletionRequest request = new GoogleAiStudioCompletionRequest(input, model); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + GoogleAiStudioCompletionRequest request = new GoogleAiStudioCompletionRequest(docsInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioEmbeddingsRequestManager.java index 6436e0231ab48..c7f87fb1cbf7f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleAiStudioEmbeddingsRequestManager.java @@ -48,13 +48,13 @@ public GoogleAiStudioEmbeddingsRequestManager(GoogleAiStudioEmbeddingsModel mode @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var truncatedInput = truncate(docsInput, model.getServiceSettings().maxInputTokens()); GoogleAiStudioEmbeddingsRequest request = new GoogleAiStudioEmbeddingsRequest(truncator, truncatedInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiEmbeddingsRequestManager.java new file mode 100644 index 0000000000000..94f44c64b04da --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiEmbeddingsRequestManager.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.googlevertexai.GoogleVertexAiResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.googlevertexai.GoogleVertexAiEmbeddingsResponseEntity; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; + +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.inference.common.Truncator.truncate; + +public class GoogleVertexAiEmbeddingsRequestManager extends GoogleVertexAiRequestManager { + + private static final Logger logger = LogManager.getLogger(GoogleVertexAiEmbeddingsRequestManager.class); + + private static final ResponseHandler HANDLER = createEmbeddingsHandler(); + + private static ResponseHandler createEmbeddingsHandler() { + return new GoogleVertexAiResponseHandler("google vertex ai embeddings", GoogleVertexAiEmbeddingsResponseEntity::fromResponse); + } + + private final GoogleVertexAiEmbeddingsModel model; + + private final Truncator truncator; + + public GoogleVertexAiEmbeddingsRequestManager(GoogleVertexAiEmbeddingsModel model, Truncator truncator, ThreadPool threadPool) { + super(threadPool, model, RateLimitGrouping.of(model)); + this.model = Objects.requireNonNull(model); + this.truncator = Objects.requireNonNull(truncator); + } + + record RateLimitGrouping(int projectIdHash) { + public static RateLimitGrouping of(GoogleVertexAiEmbeddingsModel model) { + Objects.requireNonNull(model); + + return new RateLimitGrouping(model.rateLimitServiceSettings().projectId().hashCode()); + } + } + + @Override + public void execute( + InferenceInputs inferenceInputs, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var truncatedInput = truncate(docsInput, model.getServiceSettings().maxInputTokens()); + var request = new GoogleVertexAiEmbeddingsRequest(truncator, truncatedInput, model); + + execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiRequestManager.java new file mode 100644 index 0000000000000..b1baa21ab6a8d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiRequestManager.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiModel; + +public abstract class GoogleVertexAiRequestManager extends BaseRequestManager { + + GoogleVertexAiRequestManager(ThreadPool threadPool, GoogleVertexAiModel model, Object rateLimitGroup) { + super(threadPool, model.getInferenceEntityId(), rateLimitGroup, model.rateLimitServiceSettings().rateLimitSettings()); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiRerankRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiRerankRequestManager.java new file mode 100644 index 0000000000000..e74f0049fffb0 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/GoogleVertexAiRerankRequestManager.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http.sender; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.googlevertexai.GoogleVertexAiResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiRerankRequest; +import org.elasticsearch.xpack.inference.external.response.googlevertexai.GoogleVertexAiRerankResponseEntity; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; + +import java.util.Objects; +import java.util.function.Supplier; + +public class GoogleVertexAiRerankRequestManager extends GoogleVertexAiRequestManager { + + private static final Logger logger = LogManager.getLogger(GoogleVertexAiRerankRequestManager.class); + + private static final ResponseHandler HANDLER = createGoogleVertexAiResponseHandler(); + + private static ResponseHandler createGoogleVertexAiResponseHandler() { + return new GoogleVertexAiResponseHandler( + "Google Vertex AI rerank", + (request, response) -> GoogleVertexAiRerankResponseEntity.fromResponse(response) + ); + } + + public static GoogleVertexAiRerankRequestManager of(GoogleVertexAiRerankModel model, ThreadPool threadPool) { + return new GoogleVertexAiRerankRequestManager(Objects.requireNonNull(model), Objects.requireNonNull(threadPool)); + } + + private final GoogleVertexAiRerankModel model; + + private GoogleVertexAiRerankRequestManager(GoogleVertexAiRerankModel model, ThreadPool threadPool) { + super(threadPool, model, RateLimitGrouping.of(model)); + this.model = model; + } + + record RateLimitGrouping(int projectIdHash) { + public static RateLimitGrouping of(GoogleVertexAiRerankModel model) { + Objects.requireNonNull(model); + + return new RateLimitGrouping(model.rateLimitServiceSettings().projectId().hashCode()); + } + } + + @Override + public void execute( + InferenceInputs inferenceInputs, + RequestSender requestSender, + Supplier hasRequestCompletedFunction, + ActionListener listener + ) { + var rerankInput = QueryAndDocsInputs.of(inferenceInputs); + GoogleVertexAiRerankRequest request = new GoogleVertexAiRerankRequest(rerankInput.getQuery(), rerankInput.getChunks(), model); + + execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceRequestManager.java index 6c8fc446d5243..a33eb724551f1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/HuggingFaceRequestManager.java @@ -55,13 +55,13 @@ private HuggingFaceRequestManager(HuggingFaceModel model, ResponseHandler respon @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - var truncatedInput = truncate(input, model.getTokenLimit()); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var truncatedInput = truncate(docsInput, model.getTokenLimit()); var request = new HuggingFaceInferenceRequest(truncator, truncatedInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, responseHandler, hasRequestCompletedFunction, listener)); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java index d7e07e734ce80..dd241857ef0c4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceInputs.java @@ -7,4 +7,10 @@ package org.elasticsearch.xpack.inference.external.http.sender; -public abstract class InferenceInputs {} +import org.elasticsearch.common.Strings; + +public abstract class InferenceInputs { + public static IllegalArgumentException createUnsupportedTypeException(InferenceInputs inferenceInputs) { + return new IllegalArgumentException(Strings.format("Unsupported inference inputs type: [%s]", inferenceInputs.getClass())); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java index 6199a75a41a7d..52be5d8be2b6f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/InferenceRequest.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.InferenceServiceResults; -import java.util.List; import java.util.function.Supplier; /** @@ -24,14 +23,9 @@ public interface InferenceRequest { RequestManager getRequestManager(); /** - * Returns the query associated with this request. Used for Rerank tasks. + * Returns the inputs associated with the request. */ - String getQuery(); - - /** - * Returns the text input associated with this request. - */ - List getInput(); + InferenceInputs getInferenceInputs(); /** * Returns the listener to notify of the results. diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/MistralEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/MistralEmbeddingsRequestManager.java index ab6a1bfb31372..d550749cc2348 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/MistralEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/MistralEmbeddingsRequestManager.java @@ -16,8 +16,8 @@ import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; import org.elasticsearch.xpack.inference.external.request.mistral.MistralEmbeddingsRequest; -import org.elasticsearch.xpack.inference.external.response.AzureMistralOpenAiErrorResponseEntity; import org.elasticsearch.xpack.inference.external.response.AzureMistralOpenAiExternalResponseHandler; +import org.elasticsearch.xpack.inference.external.response.ErrorMessageResponseEntity; import org.elasticsearch.xpack.inference.external.response.mistral.MistralEmbeddingsResponseEntity; import org.elasticsearch.xpack.inference.services.mistral.embeddings.MistralEmbeddingsModel; @@ -38,7 +38,7 @@ private static ResponseHandler createEmbeddingsHandler() { return new AzureMistralOpenAiExternalResponseHandler( "mistral text embedding", new MistralEmbeddingsResponseEntity(), - AzureMistralOpenAiErrorResponseEntity::fromResponse + ErrorMessageResponseEntity::fromResponse ); } @@ -51,13 +51,13 @@ public MistralEmbeddingsRequestManager(MistralEmbeddingsModel model, Truncator t @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var truncatedInput = truncate(docsInput, model.getServiceSettings().maxInputTokens()); MistralEmbeddingsRequest request = new MistralEmbeddingsRequest(truncator, truncatedInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java index 7bc09fd76736b..65f25c0baf8dc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiCompletionRequestManager.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; @@ -43,13 +42,13 @@ private OpenAiCompletionRequestManager(OpenAiChatCompletionModel model, ThreadPo @Override public void execute( - @Nullable String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - OpenAiChatCompletionRequest request = new OpenAiChatCompletionRequest(input, model); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + OpenAiChatCompletionRequest request = new OpenAiChatCompletionRequest(docsInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsRequestManager.java index 41f91d2b89ee5..5c164f2eb9644 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsRequestManager.java @@ -55,13 +55,13 @@ private OpenAiEmbeddingsRequestManager(OpenAiEmbeddingsModel model, Truncator tr @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener ) { - var truncatedInput = truncate(input, model.getServiceSettings().maxInputTokens()); + List docsInput = DocumentsOnlyInput.of(inferenceInputs).getInputs(); + var truncatedInput = truncate(docsInput, model.getServiceSettings().maxInputTokens()); OpenAiEmbeddingsRequest request = new OpenAiEmbeddingsRequest(truncator, truncatedInput, model); execute(new ExecutableInferenceRequest(requestSender, logger, request, HANDLER, hasRequestCompletedFunction, listener)); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java index 4d24598d67831..0d5f98c180ba9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/QueryAndDocsInputs.java @@ -12,7 +12,15 @@ public class QueryAndDocsInputs extends InferenceInputs { - String query; + public static QueryAndDocsInputs of(InferenceInputs inferenceInputs) { + if (inferenceInputs instanceof QueryAndDocsInputs == false) { + throw createUnsupportedTypeException(inferenceInputs); + } + + return (QueryAndDocsInputs) inferenceInputs; + } + + private final String query; public String getQuery() { return query; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java index 38d47aec68eb6..ad1324d0a315f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java @@ -413,7 +413,7 @@ private TimeValue executeEnqueuedTaskInternal() { assert shouldExecuteImmediately(reserveRes) : "Reserving request tokens required a sleep when it should not have"; task.getRequestManager() - .execute(task.getQuery(), task.getInput(), requestSender, task.getRequestCompletedFunction(), task.getListener()); + .execute(task.getInferenceInputs(), requestSender, task.getRequestCompletedFunction(), task.getListener()); return EXECUTED_A_TASK; } @@ -423,7 +423,7 @@ private static boolean shouldExecuteTask(RejectableTask task) { private static boolean isNoopRequest(InferenceRequest inferenceRequest) { return inferenceRequest.getRequestManager() == null - || inferenceRequest.getInput() == null + || inferenceRequest.getInferenceInputs() == null || inferenceRequest.getListener() == null; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManager.java index 79ef1b56ad231..853d6fdcb2473 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManager.java @@ -8,12 +8,10 @@ package org.elasticsearch.xpack.inference.external.http.sender; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; import org.elasticsearch.xpack.inference.external.ratelimit.RateLimitable; -import java.util.List; import java.util.function.Supplier; /** @@ -21,8 +19,7 @@ */ public interface RequestManager extends RateLimitable { void execute( - @Nullable String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java index 7a5f482412289..9ccb93a0858ae 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTask.java @@ -16,7 +16,6 @@ import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.threadpool.ThreadPool; -import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; @@ -27,8 +26,7 @@ class RequestTask implements RejectableTask { private final AtomicBoolean finished = new AtomicBoolean(); private final RequestManager requestCreator; - private final String query; - private final List input; + private final InferenceInputs inferenceInputs; private final ActionListener listener; RequestTask( @@ -40,16 +38,7 @@ class RequestTask implements RejectableTask { ) { this.requestCreator = Objects.requireNonNull(requestCreator); this.listener = getListener(Objects.requireNonNull(listener), timeout, Objects.requireNonNull(threadPool)); - - if (inferenceInputs instanceof QueryAndDocsInputs) { - this.query = ((QueryAndDocsInputs) inferenceInputs).getQuery(); - this.input = ((QueryAndDocsInputs) inferenceInputs).getChunks(); - } else if (inferenceInputs instanceof DocumentsOnlyInput) { - this.query = null; - this.input = ((DocumentsOnlyInput) inferenceInputs).getInputs(); - } else { - throw new IllegalArgumentException("Unsupported inference inputs type: " + inferenceInputs.getClass()); - } + this.inferenceInputs = Objects.requireNonNull(inferenceInputs); } private ActionListener getListener( @@ -91,13 +80,8 @@ public Supplier getRequestCompletedFunction() { } @Override - public List getInput() { - return input; - } - - @Override - public String getQuery() { - return query; + public InferenceInputs getInferenceInputs() { + return inferenceInputs; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonBuilder.java new file mode 100644 index 0000000000000..829e899beba5e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonBuilder.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; + +public class AmazonBedrockJsonBuilder { + + private final ToXContent jsonWriter; + + public AmazonBedrockJsonBuilder(ToXContent jsonWriter) { + this.jsonWriter = jsonWriter; + } + + public String getStringContent() throws IOException { + try (var builder = jsonBuilder()) { + return Strings.toString(jsonWriter.toXContent(builder, null)); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonWriter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonWriter.java new file mode 100644 index 0000000000000..83ebcb4563a8c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockJsonWriter.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.IOException; + +/** + * This is needed as the input for the Amazon Bedrock SDK does not like + * the formatting of XContent JSON output + */ +public interface AmazonBedrockJsonWriter { + JsonGenerator writeJson(JsonGenerator generator) throws IOException; +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockRequest.java new file mode 100644 index 0000000000000..e356212ed07fb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/AmazonBedrockRequest.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockBaseClient; +import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.net.URI; + +public abstract class AmazonBedrockRequest implements Request { + + protected final AmazonBedrockModel amazonBedrockModel; + protected final String inferenceId; + protected final TimeValue timeout; + + protected AmazonBedrockRequest(AmazonBedrockModel model, @Nullable TimeValue timeout) { + this.amazonBedrockModel = model; + this.inferenceId = model.getInferenceEntityId(); + this.timeout = timeout; + } + + protected abstract void executeRequest(AmazonBedrockBaseClient client); + + public AmazonBedrockModel model() { + return amazonBedrockModel; + } + + /** + * Amazon Bedrock uses the AWS SDK, and will not create its own Http Request + * But, this is needed for the ExecutableInferenceRequest to get the inferenceEntityId + * @return NoOp request + */ + @Override + public final HttpRequest createHttpRequest() { + return new HttpRequest(new NoOpHttpRequest(), inferenceId); + } + + /** + * Amazon Bedrock uses the AWS SDK, and will not create its own URI + * @return null + */ + @Override + public final URI getURI() { + throw new UnsupportedOperationException(); + } + + /** + * Should be overridden for text embeddings requests + * @return null + */ + @Override + public Request truncate() { + return this; + } + + /** + * Should be overridden for text embeddings requests + * @return boolean[0] + */ + @Override + public boolean[] getTruncationInfo() { + return new boolean[0]; + } + + @Override + public String getInferenceEntityId() { + return amazonBedrockModel.getInferenceEntityId(); + } + + public TimeValue timeout() { + return timeout; + } + + public abstract TaskType taskType(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/NoOpHttpRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/NoOpHttpRequest.java new file mode 100644 index 0000000000000..7087bb03bca5e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/NoOpHttpRequest.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock; + +import org.apache.http.client.methods.HttpRequestBase; + +/** + * Needed for compatibility with RequestSender + */ +public class NoOpHttpRequest extends HttpRequestBase { + @Override + public String getMethod() { + return "NOOP"; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntity.java new file mode 100644 index 0000000000000..6e2f2f6702005 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntity.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockAI21LabsCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockAI21LabsCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + return request; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntity.java new file mode 100644 index 0000000000000..a8b0032af09c5 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntity.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockAnthropicCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockAnthropicCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + if (topK == null) { + return request; + } + + String topKField = Strings.format("{\"top_k\":%f}", topK.floatValue()); + return request.withAdditionalModelResponseFieldPaths(topKField); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionEntityFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionEntityFactory.java new file mode 100644 index 0000000000000..f86d2229d42ad --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionEntityFactory.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; + +import java.util.List; +import java.util.Objects; + +public final class AmazonBedrockChatCompletionEntityFactory { + public static AmazonBedrockConverseRequestEntity createEntity(AmazonBedrockChatCompletionModel model, List messages) { + Objects.requireNonNull(model); + Objects.requireNonNull(messages); + var serviceSettings = model.getServiceSettings(); + var taskSettings = model.getTaskSettings(); + switch (serviceSettings.provider()) { + case AI21LABS -> { + return new AmazonBedrockAI21LabsCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.maxNewTokens() + ); + } + case AMAZONTITAN -> { + return new AmazonBedrockTitanCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.maxNewTokens() + ); + } + case ANTHROPIC -> { + return new AmazonBedrockAnthropicCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.topK(), + taskSettings.maxNewTokens() + ); + } + case COHERE -> { + return new AmazonBedrockCohereCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.topK(), + taskSettings.maxNewTokens() + ); + } + case META -> { + return new AmazonBedrockMetaCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.maxNewTokens() + ); + } + case MISTRAL -> { + return new AmazonBedrockMistralCompletionRequestEntity( + messages, + taskSettings.temperature(), + taskSettings.topP(), + taskSettings.topK(), + taskSettings.maxNewTokens() + ); + } + default -> { + return null; + } + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionRequest.java new file mode 100644 index 0000000000000..f02f05f2d3b17 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockChatCompletionRequest.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockBaseClient; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseListener; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; + +import java.io.IOException; +import java.util.Objects; + +public class AmazonBedrockChatCompletionRequest extends AmazonBedrockRequest { + public static final String USER_ROLE = "user"; + private final AmazonBedrockConverseRequestEntity requestEntity; + private AmazonBedrockChatCompletionResponseListener listener; + + public AmazonBedrockChatCompletionRequest( + AmazonBedrockChatCompletionModel model, + AmazonBedrockConverseRequestEntity requestEntity, + @Nullable TimeValue timeout + ) { + super(model, timeout); + this.requestEntity = Objects.requireNonNull(requestEntity); + } + + @Override + protected void executeRequest(AmazonBedrockBaseClient client) { + var converseRequest = getConverseRequest(); + + try { + SocketAccess.doPrivileged(() -> client.converse(converseRequest, listener)); + } catch (IOException e) { + listener.onFailure(new RuntimeException(e)); + } + } + + @Override + public TaskType taskType() { + return TaskType.COMPLETION; + } + + private ConverseRequest getConverseRequest() { + var converseRequest = new ConverseRequest().withModelId(amazonBedrockModel.model()); + converseRequest = requestEntity.addMessages(converseRequest); + converseRequest = requestEntity.addInferenceConfig(converseRequest); + converseRequest = requestEntity.addAdditionalModelFields(converseRequest); + return converseRequest; + } + + public void executeChatCompletionRequest( + AmazonBedrockBaseClient awsBedrockClient, + AmazonBedrockChatCompletionResponseListener chatCompletionResponseListener + ) { + this.listener = chatCompletionResponseListener; + this.executeRequest(awsBedrockClient); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntity.java new file mode 100644 index 0000000000000..17a264ef820ff --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntity.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockCohereCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockCohereCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + if (topK == null) { + return request; + } + + String topKField = Strings.format("{\"top_k\":%f}", topK.floatValue()); + return request.withAdditionalModelResponseFieldPaths(topKField); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestEntity.java new file mode 100644 index 0000000000000..fbd55e76e509b --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestEntity.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; + +public interface AmazonBedrockConverseRequestEntity { + ConverseRequest addMessages(ConverseRequest request); + + ConverseRequest addInferenceConfig(ConverseRequest request); + + ConverseRequest addAdditionalModelFields(ConverseRequest request); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseUtils.java new file mode 100644 index 0000000000000..2cfb56a94b319 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseUtils.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ContentBlock; +import com.amazonaws.services.bedrockruntime.model.Message; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest.USER_ROLE; + +public final class AmazonBedrockConverseUtils { + + public static List getConverseMessageList(List messages) { + List messageList = new ArrayList<>(); + for (String message : messages) { + var messageContent = new ContentBlock().withText(message); + var returnMessage = (new Message()).withRole(USER_ROLE).withContent(messageContent); + messageList.add(returnMessage); + } + return messageList; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntity.java new file mode 100644 index 0000000000000..cdabdd4cbebff --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntity.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockMetaCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockMetaCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + return request; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntity.java new file mode 100644 index 0000000000000..c68eaa1b81f54 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntity.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockMistralCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockMistralCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + if (topK == null) { + return request; + } + + String topKField = Strings.format("{\"top_k\":%f}", topK.floatValue()); + return request.withAdditionalModelResponseFieldPaths(topKField); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntity.java new file mode 100644 index 0000000000000..d56035b80e9ef --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntity.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.InferenceConfiguration; + +import org.elasticsearch.core.Nullable; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseUtils.getConverseMessageList; + +public record AmazonBedrockTitanCompletionRequestEntity( + List messages, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer maxTokenCount +) implements AmazonBedrockConverseRequestEntity { + + public AmazonBedrockTitanCompletionRequestEntity { + Objects.requireNonNull(messages); + } + + @Override + public ConverseRequest addMessages(ConverseRequest request) { + return request.withMessages(getConverseMessageList(messages)); + } + + @Override + public ConverseRequest addInferenceConfig(ConverseRequest request) { + if (temperature == null && topP == null && maxTokenCount == null) { + return request; + } + + InferenceConfiguration inferenceConfig = new InferenceConfiguration(); + + if (temperature != null) { + inferenceConfig = inferenceConfig.withTemperature(temperature.floatValue()); + } + + if (topP != null) { + inferenceConfig = inferenceConfig.withTopP(topP.floatValue()); + } + + if (maxTokenCount != null) { + inferenceConfig = inferenceConfig.withMaxTokens(maxTokenCount); + } + + return request.withInferenceConfig(inferenceConfig); + } + + @Override + public ConverseRequest addAdditionalModelFields(ConverseRequest request) { + return request; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntity.java new file mode 100644 index 0000000000000..edca5bc1bdf9c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntity.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public record AmazonBedrockCohereEmbeddingsRequestEntity(List input) implements ToXContentObject { + + private static final String TEXTS_FIELD = "texts"; + private static final String INPUT_TYPE_FIELD = "input_type"; + private static final String INPUT_TYPE_SEARCH_DOCUMENT = "search_document"; + + public AmazonBedrockCohereEmbeddingsRequestEntity { + Objects.requireNonNull(input); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TEXTS_FIELD, input); + builder.field(INPUT_TYPE_FIELD, INPUT_TYPE_SEARCH_DOCUMENT); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsEntityFactory.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsEntityFactory.java new file mode 100644 index 0000000000000..a31b033507264 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsEntityFactory.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.util.Objects; + +public final class AmazonBedrockEmbeddingsEntityFactory { + public static ToXContent createEntity(AmazonBedrockEmbeddingsModel model, Truncator.TruncationResult truncationResult) { + Objects.requireNonNull(model); + Objects.requireNonNull(truncationResult); + + var serviceSettings = model.getServiceSettings(); + + var truncatedInput = truncationResult.input(); + if (truncatedInput == null || truncatedInput.isEmpty()) { + throw new ElasticsearchException("[input] cannot be null or empty"); + } + + switch (serviceSettings.provider()) { + case AMAZONTITAN -> { + if (truncatedInput.size() > 1) { + throw new ElasticsearchException("[input] cannot contain more than one string"); + } + return new AmazonBedrockTitanEmbeddingsRequestEntity(truncatedInput.get(0)); + } + case COHERE -> { + return new AmazonBedrockCohereEmbeddingsRequestEntity(truncatedInput); + } + default -> { + return null; + } + } + + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsRequest.java new file mode 100644 index 0000000000000..96d3b3a3cc057 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockEmbeddingsRequest.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelRequest; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockBaseClient; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockJsonBuilder; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseListener; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Objects; + +public class AmazonBedrockEmbeddingsRequest extends AmazonBedrockRequest { + private final AmazonBedrockEmbeddingsModel embeddingsModel; + private final ToXContent requestEntity; + private final Truncator truncator; + private final Truncator.TruncationResult truncationResult; + private final AmazonBedrockProvider provider; + private ActionListener listener = null; + + public AmazonBedrockEmbeddingsRequest( + Truncator truncator, + Truncator.TruncationResult input, + AmazonBedrockEmbeddingsModel model, + ToXContent requestEntity, + @Nullable TimeValue timeout + ) { + super(model, timeout); + this.truncator = Objects.requireNonNull(truncator); + this.truncationResult = Objects.requireNonNull(input); + this.requestEntity = Objects.requireNonNull(requestEntity); + this.embeddingsModel = model; + this.provider = model.provider(); + } + + public AmazonBedrockProvider provider() { + return provider; + } + + @Override + protected void executeRequest(AmazonBedrockBaseClient client) { + try { + var jsonBuilder = new AmazonBedrockJsonBuilder(requestEntity); + var bodyAsString = jsonBuilder.getStringContent(); + + var charset = StandardCharsets.UTF_8; + var bodyBuffer = charset.encode(bodyAsString); + + var invokeModelRequest = new InvokeModelRequest().withModelId(embeddingsModel.model()).withBody(bodyBuffer); + + SocketAccess.doPrivileged(() -> client.invokeModel(invokeModelRequest, listener)); + } catch (IOException e) { + listener.onFailure(new RuntimeException(e)); + } + } + + @Override + public Request truncate() { + var truncatedInput = truncator.truncate(truncationResult.input()); + return new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, embeddingsModel, requestEntity, timeout); + } + + @Override + public boolean[] getTruncationInfo() { + return truncationResult.truncated().clone(); + } + + @Override + public TaskType taskType() { + return TaskType.TEXT_EMBEDDING; + } + + public void executeEmbeddingsRequest( + AmazonBedrockBaseClient awsBedrockClient, + AmazonBedrockEmbeddingsResponseListener embeddingsResponseListener + ) { + this.listener = embeddingsResponseListener; + this.executeRequest(awsBedrockClient); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntity.java new file mode 100644 index 0000000000000..f55edd0442913 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntity.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public record AmazonBedrockTitanEmbeddingsRequestEntity(String inputText) implements ToXContentObject { + + private static final String INPUT_TEXT_FIELD = "inputText"; + + public AmazonBedrockTitanEmbeddingsRequestEntity { + Objects.requireNonNull(inputText); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INPUT_TEXT_FIELD, inputText); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequest.java new file mode 100644 index 0000000000000..fa6bb31d0f401 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequest.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.anthropic; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ByteArrayEntity; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.anthropic.AnthropicAccount; +import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; + +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicRequestUtils.createVersionHeader; + +public class AnthropicChatCompletionRequest implements Request { + + private final AnthropicAccount account; + private final List input; + private final AnthropicChatCompletionModel model; + + public AnthropicChatCompletionRequest(List input, AnthropicChatCompletionModel model) { + this.account = AnthropicAccount.of(model); + this.input = Objects.requireNonNull(input); + this.model = Objects.requireNonNull(model); + } + + @Override + public HttpRequest createHttpRequest() { + HttpPost httpPost = new HttpPost(account.uri()); + + ByteArrayEntity byteEntity = new ByteArrayEntity( + Strings.toString(new AnthropicChatCompletionRequestEntity(input, model.getServiceSettings(), model.getTaskSettings())) + .getBytes(StandardCharsets.UTF_8) + ); + httpPost.setEntity(byteEntity); + + httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()); + httpPost.setHeader(AnthropicRequestUtils.createAuthBearerHeader(account.apiKey())); + httpPost.setHeader(createVersionHeader()); + + return new HttpRequest(httpPost, getInferenceEntityId()); + } + + @Override + public URI getURI() { + return account.uri(); + } + + @Override + public Request truncate() { + // No truncation for Anthropic completions + return this; + } + + @Override + public boolean[] getTruncationInfo() { + // No truncation for Anthropic completions + return null; + } + + @Override + public String getInferenceEntityId() { + return model.getInferenceEntityId(); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestEntity.java new file mode 100644 index 0000000000000..4186ad0a722ce --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestEntity.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.anthropic; + +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionServiceSettings; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettings; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class AnthropicChatCompletionRequestEntity implements ToXContentObject { + + private static final String MESSAGES_FIELD = "messages"; + private static final String MODEL_FIELD = "model"; + + private static final String ROLE_FIELD = "role"; + private static final String USER_VALUE = "user"; + private static final String CONTENT_FIELD = "content"; + private static final String MAX_TOKENS_FIELD = "max_tokens"; + private static final String TEMPERATURE_FIELD = "temperature"; + private static final String TOP_P_FIELD = "top_p"; + private static final String TOP_K_FIELD = "top_k"; + + private final List messages; + private final AnthropicChatCompletionServiceSettings serviceSettings; + private final AnthropicChatCompletionTaskSettings taskSettings; + + public AnthropicChatCompletionRequestEntity( + List messages, + AnthropicChatCompletionServiceSettings serviceSettings, + AnthropicChatCompletionTaskSettings taskSettings + ) { + this.messages = Objects.requireNonNull(messages); + this.serviceSettings = Objects.requireNonNull(serviceSettings); + this.taskSettings = Objects.requireNonNull(taskSettings); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startArray(MESSAGES_FIELD); + { + for (String message : messages) { + builder.startObject(); + + { + builder.field(ROLE_FIELD, USER_VALUE); + builder.field(CONTENT_FIELD, message); + } + + builder.endObject(); + } + } + builder.endArray(); + + builder.field(MODEL_FIELD, serviceSettings.modelId()); + builder.field(MAX_TOKENS_FIELD, taskSettings.maxTokens()); + + if (taskSettings.temperature() != null) { + builder.field(TEMPERATURE_FIELD, taskSettings.temperature()); + } + + if (taskSettings.topP() != null) { + builder.field(TOP_P_FIELD, taskSettings.topP()); + } + + if (taskSettings.topK() != null) { + builder.field(TOP_K_FIELD, taskSettings.topK()); + } + + builder.endObject(); + + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicRequestUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicRequestUtils.java new file mode 100644 index 0000000000000..2e8ce980dcc08 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicRequestUtils.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.anthropic; + +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.common.settings.SecureString; + +public class AnthropicRequestUtils { + public static final String HOST = "api.anthropic.com"; + public static final String API_VERSION_1 = "v1"; + public static final String MESSAGES_PATH = "messages"; + + public static final String ANTHROPIC_VERSION_2023_06_01 = "2023-06-01"; + + public static final String X_API_KEY = "x-api-key"; + public static final String VERSION = "anthropic-version"; + + public static Header createAuthBearerHeader(SecureString apiKey) { + return new BasicHeader(X_API_KEY, apiKey.toString()); + } + + public static Header createVersionHeader() { + return new BasicHeader(VERSION, ANTHROPIC_VERSION_2023_06_01); + } + + private AnthropicRequestUtils() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereRerankRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereRerankRequest.java index 492807f74b32a..4ec04c0187329 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereRerankRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereRerankRequest.java @@ -39,7 +39,7 @@ public CohereRerankRequest(String query, List input, CohereRerankModel m this.input = Objects.requireNonNull(input); this.query = Objects.requireNonNull(query); taskSettings = model.getTaskSettings(); - this.model = model.getServiceSettings().getCommonSettings().modelId(); + this.model = model.getServiceSettings().modelId(); inferenceEntityId = model.getInferenceEntityId(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java new file mode 100644 index 0000000000000..c0e36baf2e98f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ByteArrayEntity; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; + +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.Objects; + +public class GoogleVertexAiEmbeddingsRequest implements GoogleVertexAiRequest { + + private final Truncator truncator; + + private final Truncator.TruncationResult truncationResult; + + private final GoogleVertexAiEmbeddingsModel model; + + public GoogleVertexAiEmbeddingsRequest(Truncator truncator, Truncator.TruncationResult input, GoogleVertexAiEmbeddingsModel model) { + this.truncator = Objects.requireNonNull(truncator); + this.truncationResult = Objects.requireNonNull(input); + this.model = Objects.requireNonNull(model); + } + + @Override + public HttpRequest createHttpRequest() { + HttpPost httpPost = new HttpPost(model.uri()); + + ByteArrayEntity byteEntity = new ByteArrayEntity( + Strings.toString(new GoogleVertexAiEmbeddingsRequestEntity(truncationResult.input(), model.getTaskSettings().autoTruncate())) + .getBytes(StandardCharsets.UTF_8) + ); + + httpPost.setEntity(byteEntity); + httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()); + + decorateWithAuth(httpPost); + + return new HttpRequest(httpPost, getInferenceEntityId()); + } + + public void decorateWithAuth(HttpPost httpPost) { + GoogleVertexAiRequest.decorateWithBearerToken(httpPost, model.getSecretSettings()); + } + + Truncator truncator() { + return truncator; + } + + Truncator.TruncationResult truncationResult() { + return truncationResult; + } + + GoogleVertexAiEmbeddingsModel model() { + return model; + } + + @Override + public String getInferenceEntityId() { + return model.getInferenceEntityId(); + } + + @Override + public URI getURI() { + return model.uri(); + } + + @Override + public Request truncate() { + var truncatedInput = truncator.truncate(truncationResult.input()); + + return new GoogleVertexAiEmbeddingsRequest(truncator, truncatedInput, model); + } + + @Override + public boolean[] getTruncationInfo() { + return truncationResult.truncated().clone(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java new file mode 100644 index 0000000000000..2fae999599ba2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public record GoogleVertexAiEmbeddingsRequestEntity(List inputs, @Nullable Boolean autoTruncation) implements ToXContentObject { + + private static final String INSTANCES_FIELD = "instances"; + private static final String CONTENT_FIELD = "content"; + private static final String PARAMETERS_FIELD = "parameters"; + private static final String AUTO_TRUNCATE_FIELD = "autoTruncate"; + + public GoogleVertexAiEmbeddingsRequestEntity { + Objects.requireNonNull(inputs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(INSTANCES_FIELD); + + for (String input : inputs) { + builder.startObject(); + { + builder.field(CONTENT_FIELD, input); + } + builder.endObject(); + } + + builder.endArray(); + + if (autoTruncation != null) { + builder.startObject(PARAMETERS_FIELD); + { + builder.field(AUTO_TRUNCATE_FIELD, autoTruncation); + } + builder.endObject(); + } + builder.endObject(); + + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRequest.java new file mode 100644 index 0000000000000..69859ef3de642 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRequest.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.auth.oauth2.ServiceAccountCredentials; + +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.RequestUtils.createAuthBearerHeader; + +public interface GoogleVertexAiRequest extends Request { + List AUTH_SCOPE = Collections.singletonList("https://www.googleapis.com/auth/cloud-platform"); + + static void decorateWithBearerToken(HttpPost httpPost, GoogleVertexAiSecretSettings secretSettings) { + SpecialPermission.check(); + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + GoogleCredentials credentials = ServiceAccountCredentials.fromStream( + new ByteArrayInputStream(secretSettings.serviceAccountJson().toString().getBytes(StandardCharsets.UTF_8)) + ).createScoped(AUTH_SCOPE); + credentials.refreshIfExpired(); + httpPost.setHeader(createAuthBearerHeader(new SecureString(credentials.getAccessToken().getTokenValue().toCharArray()))); + + return null; + }); + } catch (Exception e) { + throw new ElasticsearchStatusException(e.getMessage(), RestStatus.FORBIDDEN, e); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequest.java new file mode 100644 index 0000000000000..79606c63e0ed6 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequest.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ByteArrayEntity; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.request.HttpRequest; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; + +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Objects; + +public class GoogleVertexAiRerankRequest implements GoogleVertexAiRequest { + + private final GoogleVertexAiRerankModel model; + + private final String query; + + private final List input; + + public GoogleVertexAiRerankRequest(String query, List input, GoogleVertexAiRerankModel model) { + this.model = Objects.requireNonNull(model); + this.query = Objects.requireNonNull(query); + this.input = Objects.requireNonNull(input); + } + + @Override + public HttpRequest createHttpRequest() { + HttpPost httpPost = new HttpPost(model.uri()); + + ByteArrayEntity byteEntity = new ByteArrayEntity( + Strings.toString( + new GoogleVertexAiRerankRequestEntity(query, input, model.getServiceSettings().modelId(), model.getTaskSettings().topN()) + ).getBytes(StandardCharsets.UTF_8) + ); + + httpPost.setEntity(byteEntity); + httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()); + + decorateWithAuth(httpPost); + + return new HttpRequest(httpPost, getInferenceEntityId()); + } + + public void decorateWithAuth(HttpPost httpPost) { + GoogleVertexAiRequest.decorateWithBearerToken(httpPost, model.getSecretSettings()); + } + + public GoogleVertexAiRerankModel model() { + return model; + } + + @Override + public String getInferenceEntityId() { + return model.getInferenceEntityId(); + } + + @Override + public URI getURI() { + return model.uri(); + } + + @Override + public Request truncate() { + return this; + } + + @Override + public boolean[] getTruncationInfo() { + return null; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestEntity.java new file mode 100644 index 0000000000000..2cac067f622cc --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestEntity.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public record GoogleVertexAiRerankRequestEntity(String query, List inputs, @Nullable String model, @Nullable Integer topN) + implements + ToXContentObject { + + private static final String MODEL_FIELD = "model"; + private static final String QUERY_FIELD = "query"; + private static final String RECORDS_FIELD = "records"; + private static final String ID_FIELD = "id"; + + private static final String CONTENT_FIELD = "content"; + private static final String TOP_N_FIELD = "topN"; + + public GoogleVertexAiRerankRequestEntity { + Objects.requireNonNull(query); + Objects.requireNonNull(inputs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (model != null) { + builder.field(MODEL_FIELD, model); + } + + builder.field(QUERY_FIELD, query); + + builder.startArray(RECORDS_FIELD); + + for (int recordId = 0; recordId < inputs.size(); recordId++) { + builder.startObject(); + + { + builder.field(ID_FIELD, String.valueOf(recordId)); + builder.field(CONTENT_FIELD, inputs.get(recordId)); + } + + builder.endObject(); + } + + builder.endArray(); + + if (topN != null) { + builder.field(TOP_N_FIELD, topN); + } + + builder.endObject(); + + return builder; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiUtils.java new file mode 100644 index 0000000000000..505676ff457bc --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiUtils.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +public final class GoogleVertexAiUtils { + + public static final String GOOGLE_VERTEX_AI_HOST_SUFFIX = "-aiplatform.googleapis.com"; + + public static final String GOOGLE_DISCOVERY_ENGINE_HOST = "discoveryengine.googleapis.com"; + + public static final String V1 = "v1"; + + public static final String PROJECTS = "projects"; + + public static final String LOCATIONS = "locations"; + + public static final String GLOBAL = "global"; + + public static final String RANKING_CONFIGS = "rankingConfigs"; + + public static final String DEFAULT_RANKING_CONFIG = "default_ranking_config"; + + public static final String PUBLISHERS = "publishers"; + + public static final String PUBLISHER_GOOGLE = "google"; + + public static final String MODELS = "models"; + + public static final String PREDICT = "predict"; + + public static final String RANK = "rank"; + + private GoogleVertexAiUtils() {} + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/AzureMistralOpenAiExternalResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/AzureMistralOpenAiExternalResponseHandler.java index dfdb6712d5e45..e4e96ca644c7f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/AzureMistralOpenAiExternalResponseHandler.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/AzureMistralOpenAiExternalResponseHandler.java @@ -116,7 +116,7 @@ public static boolean isContentTooLarge(HttpResult result) { } if (statusCode == 400) { - var errorEntity = AzureMistralOpenAiErrorResponseEntity.fromResponse(result); + var errorEntity = ErrorMessageResponseEntity.fromResponse(result); return errorEntity != null && errorEntity.getErrorMessage().contains(CONTENT_TOO_LARGE_MESSAGE); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/AzureMistralOpenAiErrorResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/ErrorMessageResponseEntity.java similarity index 90% rename from x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/AzureMistralOpenAiErrorResponseEntity.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/ErrorMessageResponseEntity.java index 83ea7801dfd58..dbf2b37955b22 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/AzureMistralOpenAiErrorResponseEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/ErrorMessageResponseEntity.java @@ -31,10 +31,10 @@ * This currently covers error handling for Azure AI Studio, however this pattern * can be used to simplify and refactor handling for Azure OpenAI and OpenAI responses. */ -public class AzureMistralOpenAiErrorResponseEntity implements ErrorMessage { +public class ErrorMessageResponseEntity implements ErrorMessage { protected String errorMessage; - public AzureMistralOpenAiErrorResponseEntity(String errorMessage) { + public ErrorMessageResponseEntity(String errorMessage) { this.errorMessage = errorMessage; } @@ -62,7 +62,7 @@ public static ErrorMessage fromResponse(HttpResult response) { if (error != null) { var message = (String) error.get("message"); if (message != null) { - return new AzureMistralOpenAiErrorResponseEntity(message); + return new ErrorMessageResponseEntity(message); } } } catch (Exception e) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java index a4f48510bc0e6..e0ba93e52c903 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java @@ -53,6 +53,33 @@ public static void positionParserAtTokenAfterField(XContentParser parser, String throw new IllegalStateException(format(errorMsgTemplate, field)); } + /** + * Iterates over the tokens until it finds a field name token with the text matching the field requested + * inside the current object (does not include nested objects). + * + * @param parser parser to move + * @param field the field name to find + * @param errorMsgTemplate a template message to populate an exception if the field cannot be found + * @throws IllegalStateException if the field cannot be found + */ + public static void positionParserAtTokenAfterFieldCurrentFlatObj(XContentParser parser, String field, String errorMsgTemplate) + throws IOException { + XContentParser.Token token = parser.nextToken(); + + while (token != null + && token != XContentParser.Token.END_OBJECT + && token != XContentParser.Token.START_OBJECT + && token != XContentParser.Token.START_ARRAY) { + if (token == XContentParser.Token.FIELD_NAME && parser.currentName().equals(field)) { + parser.nextToken(); + return; + } + token = parser.nextToken(); + } + + throw new IllegalStateException(format(errorMsgTemplate, field)); + } + /** * Progress the parser consuming and discarding tokens until the * parser points to the end of the current object. Nested objects diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponse.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponse.java new file mode 100644 index 0000000000000..54b05137acda3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponse.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock; + +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; + +public abstract class AmazonBedrockResponse { + public abstract InferenceServiceResults accept(AmazonBedrockRequest request); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseHandler.java new file mode 100644 index 0000000000000..9dc15ea667c1d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseHandler.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.ResponseHandler; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +public abstract class AmazonBedrockResponseHandler implements ResponseHandler { + @Override + public final void validateResponse(ThrottlerManager throttlerManager, Logger logger, Request request, HttpResult result) + throws RetryException { + // do nothing as the AWS SDK will take care of validation for us + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseListener.java new file mode 100644 index 0000000000000..ce4d6d1dea655 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/AmazonBedrockResponseListener.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; + +import java.util.Objects; + +public class AmazonBedrockResponseListener { + protected final AmazonBedrockRequest request; + protected final ActionListener inferenceResultsListener; + protected final AmazonBedrockResponseHandler responseHandler; + + public AmazonBedrockResponseListener( + AmazonBedrockRequest request, + AmazonBedrockResponseHandler responseHandler, + ActionListener inferenceResultsListener + ) { + this.request = Objects.requireNonNull(request); + this.responseHandler = Objects.requireNonNull(responseHandler); + this.inferenceResultsListener = Objects.requireNonNull(inferenceResultsListener); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponse.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponse.java new file mode 100644 index 0000000000000..5b3872e2c416a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponse.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponse; + +import java.util.ArrayList; + +public class AmazonBedrockChatCompletionResponse extends AmazonBedrockResponse { + + private final ConverseResult result; + + public AmazonBedrockChatCompletionResponse(ConverseResult responseResult) { + this.result = responseResult; + } + + @Override + public InferenceServiceResults accept(AmazonBedrockRequest request) { + if (request instanceof AmazonBedrockChatCompletionRequest asChatCompletionRequest) { + return fromResponse(result); + } + + throw new ElasticsearchException("unexpected request type [" + request.getClass() + "]"); + } + + public static ChatCompletionResults fromResponse(ConverseResult response) { + var responseMessage = response.getOutput().getMessage(); + + var messageContents = responseMessage.getContent(); + var resultTexts = new ArrayList(); + for (var messageContent : messageContents) { + resultTexts.add(new ChatCompletionResults.Result(messageContent.getText())); + } + + return new ChatCompletionResults(resultTexts); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseHandler.java new file mode 100644 index 0000000000000..a24f54c50eef3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseHandler.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; + +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; + +public class AmazonBedrockChatCompletionResponseHandler extends AmazonBedrockResponseHandler { + + private ConverseResult responseResult; + + public AmazonBedrockChatCompletionResponseHandler() {} + + @Override + public InferenceServiceResults parseResult(Request request, HttpResult result) throws RetryException { + var response = new AmazonBedrockChatCompletionResponse(responseResult); + return response.accept((AmazonBedrockRequest) request); + } + + @Override + public String getRequestType() { + return "Amazon Bedrock Chat Completion"; + } + + public void acceptChatCompletionResponseObject(ConverseResult response) { + this.responseResult = response; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseListener.java new file mode 100644 index 0000000000000..be03ba84571eb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/completion/AmazonBedrockChatCompletionResponseListener.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseListener; + +public class AmazonBedrockChatCompletionResponseListener extends AmazonBedrockResponseListener implements ActionListener { + + public AmazonBedrockChatCompletionResponseListener( + AmazonBedrockChatCompletionRequest request, + AmazonBedrockResponseHandler responseHandler, + ActionListener inferenceResultsListener + ) { + super(request, responseHandler, inferenceResultsListener); + } + + @Override + public void onResponse(ConverseResult result) { + ((AmazonBedrockChatCompletionResponseHandler) responseHandler).acceptChatCompletionResponseObject(result); + inferenceResultsListener.onResponse(responseHandler.parseResult(request, null)); + } + + @Override + public void onFailure(Exception e) { + throw new ElasticsearchException(e); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponse.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponse.java new file mode 100644 index 0000000000000..83fa790acbe68 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponse.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.XContentUtils; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponse; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseList; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.positionParserAtTokenAfterField; + +public class AmazonBedrockEmbeddingsResponse extends AmazonBedrockResponse { + private static final String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Amazon Bedrock embeddings response"; + private final InvokeModelResult result; + + public AmazonBedrockEmbeddingsResponse(InvokeModelResult invokeModelResult) { + this.result = invokeModelResult; + } + + @Override + public InferenceServiceResults accept(AmazonBedrockRequest request) { + if (request instanceof AmazonBedrockEmbeddingsRequest asEmbeddingsRequest) { + return fromResponse(result, asEmbeddingsRequest.provider()); + } + + throw new ElasticsearchException("unexpected request type [" + request.getClass() + "]"); + } + + public static InferenceTextEmbeddingFloatResults fromResponse(InvokeModelResult response, AmazonBedrockProvider provider) { + var charset = StandardCharsets.UTF_8; + var bodyText = String.valueOf(charset.decode(response.getBody())); + + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, bodyText)) { + // move to the first token + jsonParser.nextToken(); + + XContentParser.Token token = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + var embeddingList = parseEmbeddings(jsonParser, provider); + + return new InferenceTextEmbeddingFloatResults(embeddingList); + } catch (IOException e) { + throw new ElasticsearchException(e); + } + } + + private static List parseEmbeddings( + XContentParser jsonParser, + AmazonBedrockProvider provider + ) throws IOException { + switch (provider) { + case AMAZONTITAN -> { + return parseTitanEmbeddings(jsonParser); + } + case COHERE -> { + return parseCohereEmbeddings(jsonParser); + } + default -> throw new IOException("Unsupported provider [" + provider + "]"); + } + } + + private static List parseTitanEmbeddings(XContentParser parser) + throws IOException { + /* + Titan response: + { + "embedding": [float, float, ...], + "inputTextTokenCount": int + } + */ + positionParserAtTokenAfterField(parser, "embedding", FAILED_TO_FIND_FIELD_TEMPLATE); + List embeddingValuesList = parseList(parser, XContentUtils::parseFloat); + var embeddingValues = InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(embeddingValuesList); + return List.of(embeddingValues); + } + + private static List parseCohereEmbeddings(XContentParser parser) + throws IOException { + /* + Cohere response: + { + "embeddings": [ + [< array of 1024 floats >], + ... + ], + "id": string, + "response_type" : "embeddings_floats", + "texts": [string] + } + */ + positionParserAtTokenAfterField(parser, "embeddings", FAILED_TO_FIND_FIELD_TEMPLATE); + + List embeddingList = parseList( + parser, + AmazonBedrockEmbeddingsResponse::parseCohereEmbeddingsListItem + ); + + return embeddingList; + } + + private static InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding parseCohereEmbeddingsListItem(XContentParser parser) + throws IOException { + List embeddingValuesList = parseList(parser, XContentUtils::parseFloat); + return InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(embeddingValuesList); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseHandler.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseHandler.java new file mode 100644 index 0000000000000..a3fb68ee23486 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseHandler.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; + +public class AmazonBedrockEmbeddingsResponseHandler extends AmazonBedrockResponseHandler { + + private InvokeModelResult invokeModelResult; + + @Override + public InferenceServiceResults parseResult(Request request, HttpResult result) throws RetryException { + var responseParser = new AmazonBedrockEmbeddingsResponse(invokeModelResult); + return responseParser.accept((AmazonBedrockRequest) request); + } + + @Override + public String getRequestType() { + return "Amazon Bedrock Embeddings"; + } + + public void acceptEmbeddingsResult(InvokeModelResult result) { + this.invokeModelResult = result; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseListener.java new file mode 100644 index 0000000000000..36519ae31ff60 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/amazonbedrock/embeddings/AmazonBedrockEmbeddingsResponseListener.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings; + +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseListener; + +public class AmazonBedrockEmbeddingsResponseListener extends AmazonBedrockResponseListener implements ActionListener { + + public AmazonBedrockEmbeddingsResponseListener( + AmazonBedrockEmbeddingsRequest request, + AmazonBedrockResponseHandler responseHandler, + ActionListener inferenceResultsListener + ) { + super(request, responseHandler, inferenceResultsListener); + } + + @Override + public void onResponse(InvokeModelResult result) { + ((AmazonBedrockEmbeddingsResponseHandler) responseHandler).acceptEmbeddingsResult(result); + inferenceResultsListener.onResponse(responseHandler.parseResult(request, null)); + } + + @Override + public void onFailure(Exception e) { + inferenceResultsListener.onFailure(e); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/anthropic/AnthropicChatCompletionResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/anthropic/AnthropicChatCompletionResponseEntity.java new file mode 100644 index 0000000000000..75b504cbd8102 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/anthropic/AnthropicChatCompletionResponseEntity.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.anthropic; + +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseList; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.moveToFirstToken; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.positionParserAtTokenAfterField; + +public class AnthropicChatCompletionResponseEntity { + + private static final String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Anthropic chat completions response"; + + /** + * Parses the Anthropic chat completion response. + * For a request like: + * + *
      +     *     
      +     *         {
      +     *             "inputs": ["Please summarize this text: some text"]
      +     *         }
      +     *     
      +     * 
      + * + * The response would look like: + * + *
      +     *     
      +     *  {
      +     *      "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb",
      +     *      "type": "message",
      +     *      "role": "assistant",
      +     *      "model": "claude-3-opus-20240229",
      +     *      "content": [
      +     *          {
      +     *              "type": "text",
      +     *              "text": "result"
      +     *          }
      +     *      ],
      +     *      "stop_reason": "end_turn",
      +     *      "stop_sequence": null,
      +     *      "usage": {
      +     *          "input_tokens": 16,
      +     *          "output_tokens": 326
      +     *      }
      +     *  }
      +     *     
      +     * 
      + */ + + public static ChatCompletionResults fromResponse(Request request, HttpResult response) throws IOException { + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) { + moveToFirstToken(jsonParser); + + XContentParser.Token token = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + positionParserAtTokenAfterField(jsonParser, "content", FAILED_TO_FIND_FIELD_TEMPLATE); + + var completionResults = doParse(jsonParser); + + return new ChatCompletionResults(completionResults); + } + } + + private static List doParse(XContentParser parser) throws IOException { + var parsedResults = parseList(parser, (listParser) -> { + var parsedObject = TextObject.parse(parser); + // Anthropic also supports a tool_use type, we want to ignore those objects + if (parsedObject.type == null || parsedObject.type.equals("text") == false || parsedObject.text == null) { + return null; + } + + return new ChatCompletionResults.Result(parsedObject.text); + }); + + parsedResults.removeIf(Objects::isNull); + return parsedResults; + } + + private record TextObject(@Nullable String type, @Nullable String text) { + + private static final ParseField TEXT = new ParseField("text"); + private static final ParseField TYPE = new ParseField("type"); + private static final ObjectParser PARSER = new ObjectParser<>( + "anthropic_chat_completions_response", + true, + Builder::new + ); + + static { + PARSER.declareString(Builder::setText, TEXT); + PARSER.declareString(Builder::setType, TYPE); + } + + public static TextObject parse(XContentParser parser) throws IOException { + Builder builder = PARSER.apply(parser, null); + return builder.build(); + } + + private static final class Builder { + + private String type; + private String text; + + private Builder() {} + + public Builder setType(String type) { + this.type = type; + return this; + } + + public Builder setText(String text) { + this.text = text; + return this; + } + + public TextObject build() { + return new TextObject(type, text); + } + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java index 7f71933676ee0..c5bb536833e89 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java @@ -161,5 +161,5 @@ private static RankedDocsResults.RankedDoc parseRankedDocObject(XContentParser p private CohereRankedResponseEntity() {} - static String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Cohere embeddings response"; + static String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Cohere rerank response"; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiEmbeddingsResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiEmbeddingsResponseEntity.java new file mode 100644 index 0000000000000..7205ea83d0a7a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiEmbeddingsResponseEntity.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.googlevertexai; + +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseList; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.consumeUntilObjectEnd; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.moveToFirstToken; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.positionParserAtTokenAfterField; + +public class GoogleVertexAiEmbeddingsResponseEntity { + + private static final String FAILED_TO_FIND_FIELD_TEMPLATE = + "Failed to find required field [%s] in Google Vertex AI embeddings response"; + + /** + * Parses the Google Vertex AI get embeddings response. + * For a request like: + * + *
      +     *     
      +     *         {
      +     *             "inputs": ["Embed this", "Embed this, too"]
      +     *         }
      +     *     
      +     * 
      + * + * The response would look like: + * + *
      +     *     
      +     *         {
      +     *           "predictions": [
      +     *              {
      +     *                "embeddings": {
      +     *                  "statistics": {
      +     *                    "truncated": false,
      +     *                    "token_count": 6
      +     *                  },
      +     *                  "values": [ ... ]
      +     *                }
      +     *              }
      +     *           ]
      +     *         }
      +     *     
      +     * 
      + */ + + public static InferenceTextEmbeddingFloatResults fromResponse(Request request, HttpResult response) throws IOException { + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) { + moveToFirstToken(jsonParser); + + XContentParser.Token token = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + positionParserAtTokenAfterField(jsonParser, "predictions", FAILED_TO_FIND_FIELD_TEMPLATE); + + List embeddingList = parseList( + jsonParser, + GoogleVertexAiEmbeddingsResponseEntity::parseEmbeddingObject + ); + + return new InferenceTextEmbeddingFloatResults(embeddingList); + } + } + + private static InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding parseEmbeddingObject(XContentParser parser) + throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + + positionParserAtTokenAfterField(parser, "embeddings", FAILED_TO_FIND_FIELD_TEMPLATE); + + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + + positionParserAtTokenAfterField(parser, "values", FAILED_TO_FIND_FIELD_TEMPLATE); + + List embeddingValueList = parseList(parser, GoogleVertexAiEmbeddingsResponseEntity::parseEmbeddingList); + + // parse and discard the rest of the two objects + consumeUntilObjectEnd(parser); + consumeUntilObjectEnd(parser); + + return InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(embeddingValueList); + } + + private static float parseEmbeddingList(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); + return parser.floatValue(); + } + + private GoogleVertexAiEmbeddingsResponseEntity() {} +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiErrorResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiErrorResponseEntity.java new file mode 100644 index 0000000000000..bf14d751db868 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiErrorResponseEntity.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.googlevertexai; + +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.ErrorMessage; + +import java.util.Map; + +public class GoogleVertexAiErrorResponseEntity implements ErrorMessage { + + private final String errorMessage; + + private GoogleVertexAiErrorResponseEntity(String errorMessage) { + this.errorMessage = errorMessage; + } + + @Override + public String getErrorMessage() { + return errorMessage; + } + + /** + * An example error response for invalid auth would look like + * + * { + * "error": { + * "code": 401, + * "message": "some error message", + * "status": "UNAUTHENTICATED", + * "details": [ + * { + * "@type": "type.googleapis.com/google.rpc.ErrorInfo", + * "reason": "CREDENTIALS_MISSING", + * "domain": "googleapis.com", + * "metadata": { + * "method": "google.cloud.aiplatform.v1.PredictionService.Predict", + * "service": "aiplatform.googleapis.com" + * } + * } + * ] + * } + * } + * + * + * @param response The error response + * @return An error entity if the response is JSON with the above structure + * or null if the response does not contain the `error.message` field + */ + @SuppressWarnings("unchecked") + public static GoogleVertexAiErrorResponseEntity fromResponse(HttpResult response) { + try ( + XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON) + .createParser(XContentParserConfiguration.EMPTY, response.body()) + ) { + var responseMap = jsonParser.map(); + var error = (Map) responseMap.get("error"); + if (error != null) { + var message = (String) error.get("message"); + if (message != null) { + return new GoogleVertexAiErrorResponseEntity(message); + } + } + } catch (Exception e) { + // swallow the error + } + + return null; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntity.java new file mode 100644 index 0000000000000..24946ee5875a5 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntity.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.googlevertexai; + +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseList; +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.moveToFirstToken; +import static org.elasticsearch.xpack.inference.external.response.XContentUtils.positionParserAtTokenAfterField; + +public class GoogleVertexAiRerankResponseEntity { + + private static final String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Google Vertex AI rerank response"; + + /** + * Parses the Google Vertex AI rerank response. + * + * For a request like: + * + *
      +     *     
      +     *         {
      +     *              "query": "some query",
      +     *              "records": [
      +     *                  {
      +     *                      "id": "1",
      +     *                      "title": "title 1",
      +     *                      "content": "content 1"
      +     *                  },
      +     *                  {
      +     *                      "id": "2",
      +     *                      "title": "title 2",
      +     *                      "content": "content 2"
      +     *                  }
      +     *     ]
      +     * }
      +     *     
      +     * 
      + * + * The response would look like: + * + *
      +     *     
      +     *         {
      +     *              "records": [
      +     *                  {
      +     *                      "id": "2",
      +     *                      "title": "title 2",
      +     *                      "content": "content 2",
      +     *                      "score": 0.97
      +     *                  },
      +     *                  {
      +     *                      "id": "1",
      +     *                      "title": "title 1",
      +     *                      "content": "content 1",
      +     *                      "score": 0.18
      +     *                  }
      +     *             ]
      +     *         }
      +     *     
      +     * 
      + */ + + public static RankedDocsResults fromResponse(HttpResult response) throws IOException { + var parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); + + try (XContentParser jsonParser = XContentFactory.xContent(XContentType.JSON).createParser(parserConfig, response.body())) { + moveToFirstToken(jsonParser); + + XContentParser.Token token = jsonParser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, jsonParser); + + positionParserAtTokenAfterField(jsonParser, "records", FAILED_TO_FIND_FIELD_TEMPLATE); + + var rankedDocs = doParse(jsonParser); + + return new RankedDocsResults(rankedDocs); + } + } + + private static List doParse(XContentParser parser) throws IOException { + return parseList(parser, (listParser, index) -> { + var parsedRankedDoc = RankedDoc.parse(parser); + + if (parsedRankedDoc.content == null) { + throw new IllegalStateException(format(FAILED_TO_FIND_FIELD_TEMPLATE, RankedDoc.CONTENT.getPreferredName())); + } + + if (parsedRankedDoc.score == null) { + throw new IllegalStateException(format(FAILED_TO_FIND_FIELD_TEMPLATE, RankedDoc.SCORE.getPreferredName())); + } + + return new RankedDocsResults.RankedDoc(index, parsedRankedDoc.score, parsedRankedDoc.content); + }); + } + + private record RankedDoc(@Nullable Float score, @Nullable String content) { + + private static final ParseField CONTENT = new ParseField("content"); + private static final ParseField SCORE = new ParseField("score"); + private static final ObjectParser PARSER = new ObjectParser<>( + "google_vertex_ai_rerank_response", + true, + Builder::new + ); + + static { + PARSER.declareString(Builder::setContent, CONTENT); + PARSER.declareFloat(Builder::setScore, SCORE); + } + + public static RankedDoc parse(XContentParser parser) { + Builder builder = PARSER.apply(parser, null); + return builder.build(); + } + + private static final class Builder { + + private String content; + private Float score; + + private Builder() {} + + public Builder setScore(Float score) { + this.score = score; + return this; + } + + public Builder setContent(String content) { + this.content = content; + return this; + } + + public RankedDoc build() { + return new RankedDoc(score, content); + } + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java index 8ec614247bfbb..0c807c1166608 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.SimilarityMeasure; @@ -64,20 +65,37 @@ public record SemanticTextField(String fieldName, List originalValues, I static final String TASK_TYPE_FIELD = "task_type"; static final String DIMENSIONS_FIELD = "dimensions"; static final String SIMILARITY_FIELD = "similarity"; + static final String ELEMENT_TYPE_FIELD = "element_type"; public record InferenceResult(String inferenceId, ModelSettings modelSettings, List chunks) {} public record Chunk(String text, BytesReference rawEmbeddings) {} - public record ModelSettings(TaskType taskType, Integer dimensions, SimilarityMeasure similarity) implements ToXContentObject { + public record ModelSettings( + TaskType taskType, + Integer dimensions, + SimilarityMeasure similarity, + DenseVectorFieldMapper.ElementType elementType + ) implements ToXContentObject { public ModelSettings(Model model) { - this(model.getTaskType(), model.getServiceSettings().dimensions(), model.getServiceSettings().similarity()); + this( + model.getTaskType(), + model.getServiceSettings().dimensions(), + model.getServiceSettings().similarity(), + model.getServiceSettings().elementType() + ); } - public ModelSettings(TaskType taskType, Integer dimensions, SimilarityMeasure similarity) { + public ModelSettings( + TaskType taskType, + Integer dimensions, + SimilarityMeasure similarity, + DenseVectorFieldMapper.ElementType elementType + ) { this.taskType = Objects.requireNonNull(taskType, "task type must not be null"); this.dimensions = dimensions; this.similarity = similarity; + this.elementType = elementType; validate(); } @@ -91,6 +109,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (similarity != null) { builder.field(SIMILARITY_FIELD, similarity); } + if (elementType != null) { + builder.field(ELEMENT_TYPE_FIELD, elementType); + } return builder.endObject(); } @@ -104,6 +125,9 @@ public String toString() { if (similarity != null) { sb.append(", similarity=").append(similarity); } + if (elementType != null) { + sb.append(", element_type=").append(elementType); + } return sb.toString(); } @@ -112,10 +136,12 @@ private void validate() { case TEXT_EMBEDDING: validateFieldPresent(DIMENSIONS_FIELD, dimensions); validateFieldPresent(SIMILARITY_FIELD, similarity); + validateFieldPresent(ELEMENT_TYPE_FIELD, elementType); break; case SPARSE_EMBEDDING: validateFieldNotPresent(DIMENSIONS_FIELD, dimensions); validateFieldNotPresent(SIMILARITY_FIELD, similarity); + validateFieldNotPresent(ELEMENT_TYPE_FIELD, elementType); break; default: @@ -247,7 +273,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws TaskType taskType = TaskType.fromString((String) args[0]); Integer dimensions = (Integer) args[1]; SimilarityMeasure similarity = args[2] == null ? null : SimilarityMeasure.fromString((String) args[2]); - return new ModelSettings(taskType, dimensions, similarity); + DenseVectorFieldMapper.ElementType elementType = args[3] == null + ? null + : DenseVectorFieldMapper.ElementType.fromString((String) args[3]); + return new ModelSettings(taskType, dimensions, similarity, elementType); } ); @@ -273,6 +302,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws MODEL_SETTINGS_PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField(TASK_TYPE_FIELD)); MODEL_SETTINGS_PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), new ParseField(DIMENSIONS_FIELD)); MODEL_SETTINGS_PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField(SIMILARITY_FIELD)); + MODEL_SETTINGS_PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField(ELEMENT_TYPE_FIELD)); } /** diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index c2a4907125a31..b9b95afbf6dc6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.mapper; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ScoreMode; @@ -137,7 +138,7 @@ protected void merge(FieldMapper mergeWith, Conflicts conflicts, MapperMergeCont super.merge(mergeWith, conflicts, mapperMergeContext); conflicts.check(); var semanticMergeWith = (SemanticTextFieldMapper) mergeWith; - var context = mapperMergeContext.createChildContext(mergeWith.simpleName(), ObjectMapper.Dynamic.FALSE); + var context = mapperMergeContext.createChildContext(mergeWith.leafName(), ObjectMapper.Dynamic.FALSE); var inferenceField = inferenceFieldBuilder.apply(context.getMapperBuilderContext()); var mergedInferenceField = inferenceField.merge(semanticMergeWith.fieldType().getInferenceField(), context); inferenceFieldBuilder = c -> mergedInferenceField; @@ -146,16 +147,20 @@ protected void merge(FieldMapper mergeWith, Conflicts conflicts, MapperMergeCont @Override public SemanticTextFieldMapper build(MapperBuilderContext context) { if (copyTo.copyToFields().isEmpty() == false) { - throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name() + "] does not support [copy_to]"); + throw new IllegalArgumentException(CONTENT_TYPE + " field [" + leafName() + "] does not support [copy_to]"); } if (multiFieldsBuilder.hasMultiFields()) { - throw new IllegalArgumentException(CONTENT_TYPE + " field [" + name() + "] does not support multi-fields"); + throw new IllegalArgumentException(CONTENT_TYPE + " field [" + leafName() + "] does not support multi-fields"); } - final String fullName = context.buildFullName(name()); - var childContext = context.createChildContext(name(), ObjectMapper.Dynamic.FALSE); + final String fullName = context.buildFullName(leafName()); + + if (context.isInNestedContext()) { + throw new IllegalArgumentException(CONTENT_TYPE + " field [" + fullName + "] cannot be nested"); + } + var childContext = context.createChildContext(leafName(), ObjectMapper.Dynamic.FALSE); final ObjectMapper inferenceField = inferenceFieldBuilder.apply(childContext); return new SemanticTextFieldMapper( - name(), + leafName(), new SemanticTextFieldType( fullName, inferenceId.getValue(), @@ -182,7 +187,7 @@ public Iterator iterator() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), fieldType().indexVersionCreated, fieldType().getChunksField().bitsetProducer()).init(this); + return new Builder(leafName(), fieldType().indexVersionCreated, fieldType().getChunksField().bitsetProducer()).init(this); } @Override @@ -197,7 +202,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio boolean isWithinLeaf = context.path().isWithinLeafObject(); try { context.path().setWithinLeafObject(true); - field = SemanticTextField.parse(parser, new Tuple<>(name(), context.parser().contentType())); + field = SemanticTextField.parse(parser, new Tuple<>(fullPath(), context.parser().contentType())); } finally { context.path().setWithinLeafObject(isWithinLeaf); } @@ -221,7 +226,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio if (fieldType().getModelSettings() == null) { context.path().remove(); Builder builder = (Builder) new Builder( - simpleName(), + leafName(), fieldType().indexVersionCreated, fieldType().getChunksField().bitsetProducer() ).init(this); @@ -231,7 +236,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio .build(context.createDynamicMapperBuilderContext()); context.addDynamicMapper(mapper); } finally { - context.path().add(simpleName()); + context.path().add(leafName()); } } else { Conflicts conflicts = new Conflicts(fullFieldName); @@ -242,7 +247,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio throw new DocumentParsingException( xContentLocation, "Incompatible model settings for field [" - + name() + + fullPath() + "]. Check that the " + INFERENCE_ID_FIELD + " is not using different model settings", @@ -284,12 +289,12 @@ public InferenceFieldMetadata getMetadata(Set sourcePaths) { String[] copyFields = sourcePaths.toArray(String[]::new); // ensure consistent order Arrays.sort(copyFields); - return new InferenceFieldMetadata(name(), fieldType().inferenceId, copyFields); + return new InferenceFieldMetadata(fullPath(), fieldType().inferenceId, copyFields); } @Override public Object getOriginalValue(Map sourceAsMap) { - Object fieldValue = sourceAsMap.get(name()); + Object fieldValue = sourceAsMap.get(fullPath()); if (fieldValue == null) { return null; } else if (fieldValue instanceof Map == false) { @@ -297,7 +302,7 @@ public Object getOriginalValue(Map sourceAsMap) { return fieldValue; } - Map fieldValueMap = XContentMapValues.nodeMapValue(fieldValue, "Field [" + name() + "]"); + Map fieldValueMap = XContentMapValues.nodeMapValue(fieldValue, "Field [" + fullPath() + "]"); return XContentMapValues.extractValue(TEXT_FIELD, fieldValueMap); } @@ -352,6 +357,21 @@ public Query termQuery(Object value, SearchExecutionContext context) { throw new IllegalArgumentException(CONTENT_TYPE + " fields do not support term query"); } + @Override + public Query existsQuery(SearchExecutionContext context) { + if (getEmbeddingsField() == null) { + return new MatchNoDocsQuery(); + } + + return NestedQueryBuilder.toQuery( + (c -> getEmbeddingsField().fieldType().existsQuery(c)), + getChunksFieldName(name()), + ScoreMode.None, + false, + context + ); + } + @Override public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { // Redirect the fetcher to load the original values of the field @@ -427,7 +447,7 @@ public QueryBuilder semanticQuery(InferenceResults inferenceResults, float boost ); } - yield new KnnVectorQueryBuilder(inferenceResultsFieldName, inference, null, null); + yield new KnnVectorQueryBuilder(inferenceResultsFieldName, inference, null, null, null); } default -> throw new IllegalStateException( "Field [" @@ -478,6 +498,7 @@ private static Mapper.Builder createEmbeddingsField(IndexVersion indexVersionCre CHUNKED_EMBEDDINGS_FIELD, indexVersionCreated ); + SimilarityMeasure similarity = modelSettings.similarity(); if (similarity != null) { switch (similarity) { @@ -490,6 +511,8 @@ private static Mapper.Builder createEmbeddingsField(IndexVersion indexVersionCre } } denseVectorMapperBuilder.dimensions(modelSettings.dimensions()); + denseVectorMapperBuilder.elementType(modelSettings.elementType()); + yield denseVectorMapperBuilder; } default -> throw new IllegalArgumentException("Invalid task_type in model_settings [" + modelSettings.taskType().name() + "]"); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankBuilder.java new file mode 100644 index 0000000000000..6bc43a4309b0c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankBuilder.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankShardContext; +import org.elasticsearch.search.rank.rerank.RerankingRankFeaturePhaseRankShardContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder.FIELD_FIELD; +import static org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder.INFERENCE_ID_FIELD; +import static org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder.INFERENCE_TEXT_FIELD; +import static org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder.MIN_SCORE_FIELD; + +/** + * A {@code RankBuilder} that enables ranking with text similarity model inference. Supports parameters for configuring the inference call. + */ +public class TextSimilarityRankBuilder extends RankBuilder { + + public static final String NAME = "text_similarity_reranker"; + + public static final LicensedFeature.Momentary TEXT_SIMILARITY_RERANKER_FEATURE = LicensedFeature.momentary( + null, + "text-similarity-reranker", + License.OperationMode.ENTERPRISE + ); + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + String inferenceId = (String) args[0]; + String inferenceText = (String) args[1]; + String field = (String) args[2]; + Integer rankWindowSize = args[3] == null ? DEFAULT_RANK_WINDOW_SIZE : (Integer) args[3]; + Float minScore = (Float) args[4]; + + return new TextSimilarityRankBuilder(field, inferenceId, inferenceText, rankWindowSize, minScore); + }); + + static { + PARSER.declareString(constructorArg(), INFERENCE_ID_FIELD); + PARSER.declareString(constructorArg(), INFERENCE_TEXT_FIELD); + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareFloat(optionalConstructorArg(), MIN_SCORE_FIELD); + } + + private final String inferenceId; + private final String inferenceText; + private final String field; + private final Float minScore; + + public TextSimilarityRankBuilder(String field, String inferenceId, String inferenceText, int rankWindowSize, Float minScore) { + super(rankWindowSize); + this.inferenceId = inferenceId; + this.inferenceText = inferenceText; + this.field = field; + this.minScore = minScore; + } + + public TextSimilarityRankBuilder(StreamInput in) throws IOException { + super(in); + // rankWindowSize deserialization is handled by the parent class RankBuilder + this.inferenceId = in.readString(); + this.inferenceText = in.readString(); + this.field = in.readString(); + this.minScore = in.readOptionalFloat(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.TEXT_SIMILARITY_RERANKER_RETRIEVER; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + out.writeString(inferenceId); + out.writeString(inferenceText); + out.writeString(field); + out.writeOptionalFloat(minScore); + } + + @Override + public void doXContent(XContentBuilder builder, Params params) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + builder.field(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + builder.field(INFERENCE_TEXT_FIELD.getPreferredName(), inferenceText); + builder.field(FIELD_FIELD.getPreferredName(), field); + if (minScore != null) { + builder.field(MIN_SCORE_FIELD.getPreferredName(), minScore); + } + } + + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public Explanation explainHit(Explanation baseExplanation, RankDoc scoreDoc, List queryNames) { + if (scoreDoc == null) { + return baseExplanation; + } + if (false == baseExplanation.isMatch()) { + return baseExplanation; + } + + assert scoreDoc instanceof RankFeatureDoc : "ScoreDoc is not an instance of RankFeatureDoc"; + RankFeatureDoc rrfRankDoc = (RankFeatureDoc) scoreDoc; + + return Explanation.match( + rrfRankDoc.score, + "rank after reranking: [" + + rrfRankDoc.rank + + "] with score: [" + + rrfRankDoc.score + + "], using inference endpoint: [" + + inferenceId + + "] on document field: [" + + field + + "]", + baseExplanation + ); + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new RerankingQueryPhaseRankShardContext(queries, rankWindowSize()); + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new RerankingQueryPhaseRankCoordinatorContext(rankWindowSize()); + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RerankingRankFeaturePhaseRankShardContext(field); + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext(int size, int from, Client client) { + return new TextSimilarityRankFeaturePhaseRankCoordinatorContext( + size, + from, + rankWindowSize(), + client, + inferenceId, + inferenceText, + minScore + ); + } + + public String field() { + return field; + } + + public String inferenceId() { + return inferenceId; + } + + public String inferenceText() { + return inferenceText; + } + + public Float minScore() { + return minScore; + } + + @Override + protected boolean doEquals(RankBuilder other) { + TextSimilarityRankBuilder that = (TextSimilarityRankBuilder) other; + return Objects.equals(inferenceId, that.inferenceId) + && Objects.equals(inferenceText, that.inferenceText) + && Objects.equals(field, that.field) + && Objects.equals(minScore, that.minScore); + } + + @Override + protected int doHashCode() { + return Objects.hash(inferenceId, inferenceText, field, minScore); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java new file mode 100644 index 0000000000000..a22126439e9e2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Map; + +/** + * A {@code RankFeaturePhaseRankCoordinatorContext} that performs a rerank inference call to determine relevance scores for documents within + * the provided rank window. + */ +public class TextSimilarityRankFeaturePhaseRankCoordinatorContext extends RankFeaturePhaseRankCoordinatorContext { + + protected final Client client; + protected final String inferenceId; + protected final String inferenceText; + protected final Float minScore; + + public TextSimilarityRankFeaturePhaseRankCoordinatorContext( + int size, + int from, + int rankWindowSize, + Client client, + String inferenceId, + String inferenceText, + Float minScore + ) { + super(size, from, rankWindowSize); + this.client = client; + this.inferenceId = inferenceId; + this.inferenceText = inferenceText; + this.minScore = minScore; + } + + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + // Wrap the provided rankListener to an ActionListener that would handle the response from the inference service + // and then pass the results + final ActionListener actionListener = scoreListener.delegateFailureAndWrap((l, r) -> { + float[] scores = extractScoresFromResponse(r); + if (scores.length != featureDocs.length) { + l.onFailure( + new IllegalStateException("Document and score count mismatch: [" + featureDocs.length + "] vs [" + scores.length + "]") + ); + } else { + l.onResponse(scores); + } + }); + + List featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); + InferenceAction.Request request = generateRequest(featureData); + try { + client.execute(InferenceAction.INSTANCE, request, actionListener); + } finally { + request.decRef(); + } + } + + protected InferenceAction.Request generateRequest(List docFeatures) { + return new InferenceAction.Request( + TaskType.RERANK, + inferenceId, + inferenceText, + docFeatures, + Map.of(), + InputType.SEARCH, + InferenceAction.Request.DEFAULT_TIMEOUT + ); + } + + private float[] extractScoresFromResponse(InferenceAction.Response response) { + InferenceServiceResults results = response.getResults(); + assert results instanceof RankedDocsResults; + + List rankedDocs = ((RankedDocsResults) results).getRankedDocs(); + float[] scores = new float[rankedDocs.size()]; + for (RankedDocsResults.RankedDoc rankedDoc : rankedDocs) { + scores[rankedDoc.index()] = rankedDoc.relevanceScore(); + } + + return scores; + } + + /** + * Sorts documents by score descending and discards those with a score less than minScore. + * @param originalDocs documents to process + */ + @Override + protected RankFeatureDoc[] preprocess(RankFeatureDoc[] originalDocs) { + return Arrays.stream(originalDocs) + .filter(doc -> minScore == null || doc.score >= minScore) + .sorted(Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()) + .toArray(RankFeatureDoc[]::new); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java new file mode 100644 index 0000000000000..a81fbb51f678d --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A {@code RetrieverBuilder} for parsing and constructing a text similarity reranker retriever. + */ +public class TextSimilarityRankRetrieverBuilder extends RetrieverBuilder { + + public static final NodeFeature TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED = new NodeFeature( + "text_similarity_reranker_retriever_supported" + ); + + public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); + public static final ParseField INFERENCE_ID_FIELD = new ParseField("inference_id"); + public static final ParseField INFERENCE_TEXT_FIELD = new ParseField("inference_text"); + public static final ParseField FIELD_FIELD = new ParseField("field"); + public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); + public static final ParseField MIN_SCORE_FIELD = new ParseField("min_score"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(TextSimilarityRankBuilder.NAME, args -> { + RetrieverBuilder retrieverBuilder = (RetrieverBuilder) args[0]; + String inferenceId = (String) args[1]; + String inferenceText = (String) args[2]; + String field = (String) args[3]; + int rankWindowSize = args[4] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[4]; + Float minScore = (Float) args[5]; + + return new TextSimilarityRankRetrieverBuilder(retrieverBuilder, inferenceId, inferenceText, field, rankWindowSize, minScore); + }); + + static { + PARSER.declareNamedObject(constructorArg(), (p, c, n) -> p.namedObject(RetrieverBuilder.class, n, c), RETRIEVER_FIELD); + PARSER.declareString(constructorArg(), INFERENCE_ID_FIELD); + PARSER.declareString(constructorArg(), INFERENCE_TEXT_FIELD); + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareFloat(optionalConstructorArg(), MIN_SCORE_FIELD); + + RetrieverBuilder.declareBaseParserFields(TextSimilarityRankBuilder.NAME, PARSER); + } + + public static TextSimilarityRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) + throws IOException { + if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + TextSimilarityRankBuilder.NAME + "]"); + } + if (TextSimilarityRankBuilder.TEXT_SIMILARITY_RERANKER_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { + throw LicenseUtils.newComplianceException(TextSimilarityRankBuilder.NAME); + } + return PARSER.apply(parser, context); + } + + private final RetrieverBuilder retrieverBuilder; + private final String inferenceId; + private final String inferenceText; + private final String field; + private final int rankWindowSize; + private final Float minScore; + + public TextSimilarityRankRetrieverBuilder( + RetrieverBuilder retrieverBuilder, + String inferenceId, + String inferenceText, + String field, + int rankWindowSize, + Float minScore + ) { + this.retrieverBuilder = retrieverBuilder; + this.inferenceId = inferenceId; + this.inferenceText = inferenceText; + this.field = field; + this.rankWindowSize = rankWindowSize; + this.minScore = minScore; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); + + // Combining with other rank builder (such as RRF) is not supported yet + if (searchSourceBuilder.rankBuilder() != null) { + throw new IllegalArgumentException("text similarity rank builder cannot be combined with other rank builders"); + } + + searchSourceBuilder.rankBuilder( + new TextSimilarityRankBuilder(this.field, this.inferenceId, this.inferenceText, this.rankWindowSize, this.minScore) + ); + } + + @Override + public String getName() { + return TextSimilarityRankBuilder.NAME; + } + + public int rankWindowSize() { + return rankWindowSize; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RETRIEVER_FIELD.getPreferredName()); + builder.startObject(); + builder.field(retrieverBuilder.getName(), retrieverBuilder); + builder.endObject(); + builder.field(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + builder.field(INFERENCE_TEXT_FIELD.getPreferredName(), inferenceText); + builder.field(FIELD_FIELD.getPreferredName(), field); + builder.field(RANK_WINDOW_SIZE_FIELD.getPreferredName(), rankWindowSize); + if (minScore != null) { + builder.field(MIN_SCORE_FIELD.getPreferredName(), minScore); + } + } + + @Override + protected boolean doEquals(Object other) { + TextSimilarityRankRetrieverBuilder that = (TextSimilarityRankRetrieverBuilder) other; + return Objects.equals(retrieverBuilder, that.retrieverBuilder) + && Objects.equals(inferenceId, that.inferenceId) + && Objects.equals(inferenceText, that.inferenceText) + && Objects.equals(field, that.field) + && Objects.equals(rankWindowSize, that.rankWindowSize) + && Objects.equals(minScore, that.minScore); + } + + @Override + protected int doHashCode() { + return Objects.hash(retrieverBuilder, inferenceId, inferenceText, field, rankWindowSize, minScore); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceFields.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceFields.java index 8d83a8a81ec0d..1af79a69839ac 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceFields.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceFields.java @@ -14,9 +14,16 @@ public final class ServiceFields { public static final String SIMILARITY = "similarity"; public static final String DIMENSIONS = "dimensions"; + // Typically we use this to define the maximum tokens for the input text (text being sent to an integration) public static final String MAX_INPUT_TOKENS = "max_input_tokens"; public static final String URL = "url"; public static final String MODEL_ID = "model_id"; + /** + * Represents the field elasticsearch uses to determine the embedding type (e.g. float, byte). + * The value this field is normally set to would be one of the values in + * {@link org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType} + */ + public static final String ELEMENT_TYPE = "element_type"; private ServiceFields() { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index f9aca89969614..3f3a61269f3e4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; @@ -23,6 +24,8 @@ import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.inference.results.TextEmbedding; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.services.settings.ApiKeySecrets; import java.net.URI; @@ -37,6 +40,9 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings.ENABLED; +import static org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings.MAX_NUMBER_OF_ALLOCATIONS; +import static org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings.MIN_NUMBER_OF_ALLOCATIONS; import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; public final class ServiceUtils { @@ -126,6 +132,33 @@ public static Object removeAsOneOfTypes( return null; } + public static AdaptiveAllocationsSettings removeAsAdaptiveAllocationsSettings( + Map sourceMap, + String key, + ValidationException validationException + ) { + if (AdaptiveAllocationsFeatureFlag.isEnabled() == false) { + return null; + } + Map settingsMap = ServiceUtils.removeFromMap(sourceMap, key); + if (settingsMap == null) { + return null; + } + AdaptiveAllocationsSettings settings = new AdaptiveAllocationsSettings( + ServiceUtils.removeAsType(settingsMap, ENABLED.getPreferredName(), Boolean.class, validationException), + ServiceUtils.removeAsType(settingsMap, MIN_NUMBER_OF_ALLOCATIONS.getPreferredName(), Integer.class, validationException), + ServiceUtils.removeAsType(settingsMap, MAX_NUMBER_OF_ALLOCATIONS.getPreferredName(), Integer.class, validationException) + ); + for (String settingName : settingsMap.keySet()) { + validationException.addValidationError(invalidSettingError(settingName, key)); + } + ActionRequestValidationException exception = settings.validate(); + if (exception != null) { + validationException.addValidationErrors(exception.validationErrors()); + } + return settings; + } + @SuppressWarnings("unchecked") public static Map removeFromMap(Map sourceMap, String fieldName) { return (Map) sourceMap.remove(fieldName); @@ -177,6 +210,10 @@ public static String missingSettingErrorMsg(String settingName, String scope) { return Strings.format("[%s] does not contain the required setting [%s]", scope, settingName); } + public static String missingOneOfSettingsErrorMsg(List settingNames, String scope) { + return Strings.format("[%s] does not contain one of the required settings [%s]", scope, String.join(", ", settingNames)); + } + public static String invalidTypeErrorMsg(String settingName, Object foundObject, String expectedType) { return Strings.format( "field [%s] is not of the expected type. The value [%s] cannot be converted to a [%s]", @@ -233,11 +270,7 @@ public static String invalidSettingError(String settingName, String scope) { public static URI convertToUri(@Nullable String url, String settingName, String settingScope, ValidationException validationException) { try { - if (url == null) { - return null; - } - - return createUri(url); + return createOptionalUri(url); } catch (IllegalArgumentException cause) { validationException.addValidationError(ServiceUtils.invalidUrlErrorMsg(url, settingName, settingScope, cause.getMessage())); return null; @@ -355,6 +388,32 @@ public static String extractOptionalString( return optionalField; } + public static Integer extractRequiredPositiveInteger( + Map map, + String settingName, + String scope, + ValidationException validationException + ) { + int initialValidationErrorCount = validationException.validationErrors().size(); + Integer field = ServiceUtils.removeAsType(map, settingName, Integer.class, validationException); + + if (validationException.validationErrors().size() > initialValidationErrorCount) { + return null; + } + + if (field == null) { + validationException.addValidationError(ServiceUtils.missingSettingErrorMsg(settingName, scope)); + } else if (field <= 0) { + validationException.addValidationError(ServiceUtils.mustBeAPositiveIntegerErrorMessage(settingName, scope, field)); + } + + if (validationException.validationErrors().size() > initialValidationErrorCount) { + return null; + } + + return field; + } + public static Integer extractOptionalPositiveInteger( Map map, String settingName, @@ -625,5 +684,9 @@ public static SecureString apiKey(@Nullable ApiKeySecrets secrets) { return secrets == null ? new SecureString(new char[0]) : secrets.apiKey(); } + public static T nonNullOrDefault(@Nullable T requestValue, @Nullable T originalSettingsValue) { + return requestValue == null ? originalSettingsValue : requestValue; + } + private ServiceUtils() {} } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockConstants.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockConstants.java new file mode 100644 index 0000000000000..1755dac2ac13f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockConstants.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +public class AmazonBedrockConstants { + public static final String ACCESS_KEY_FIELD = "access_key"; + public static final String SECRET_KEY_FIELD = "secret_key"; + public static final String REGION_FIELD = "region"; + public static final String MODEL_FIELD = "model"; + public static final String PROVIDER_FIELD = "provider"; + + public static final String TEMPERATURE_FIELD = "temperature"; + public static final String TOP_P_FIELD = "top_p"; + public static final String TOP_K_FIELD = "top_k"; + public static final String MAX_NEW_TOKENS_FIELD = "max_new_tokens"; + + public static final Double MIN_TEMPERATURE_TOP_P_TOP_K_VALUE = 0.0; + public static final Double MAX_TEMPERATURE_TOP_P_TOP_K_VALUE = 1.0; + + public static final int DEFAULT_MAX_CHUNK_SIZE = 2048; + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java new file mode 100644 index 0000000000000..13ca8bd7bd749 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockModel.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionVisitor; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.util.Map; + +public abstract class AmazonBedrockModel extends Model { + + protected String region; + protected String model; + protected AmazonBedrockProvider provider; + protected RateLimitSettings rateLimitSettings; + + protected AmazonBedrockModel(ModelConfigurations modelConfigurations, ModelSecrets secrets) { + super(modelConfigurations, secrets); + setPropertiesFromServiceSettings((AmazonBedrockServiceSettings) modelConfigurations.getServiceSettings()); + } + + protected AmazonBedrockModel(Model model, TaskSettings taskSettings) { + super(model, taskSettings); + + if (model instanceof AmazonBedrockModel bedrockModel) { + setPropertiesFromServiceSettings(bedrockModel.getServiceSettings()); + } + } + + protected AmazonBedrockModel(Model model, ServiceSettings serviceSettings) { + super(model, serviceSettings); + if (serviceSettings instanceof AmazonBedrockServiceSettings bedrockServiceSettings) { + setPropertiesFromServiceSettings(bedrockServiceSettings); + } + } + + protected AmazonBedrockModel(ModelConfigurations modelConfigurations) { + super(modelConfigurations); + setPropertiesFromServiceSettings((AmazonBedrockServiceSettings) modelConfigurations.getServiceSettings()); + } + + public String region() { + return region; + } + + public String model() { + return model; + } + + public AmazonBedrockProvider provider() { + return provider; + } + + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; + } + + private void setPropertiesFromServiceSettings(AmazonBedrockServiceSettings serviceSettings) { + this.region = serviceSettings.region(); + this.model = serviceSettings.model(); + this.provider = serviceSettings.provider(); + this.rateLimitSettings = serviceSettings.rateLimitSettings(); + } + + public abstract ExecutableAction accept(AmazonBedrockActionVisitor creator, Map taskSettings); + + @Override + public AmazonBedrockServiceSettings getServiceSettings() { + return (AmazonBedrockServiceSettings) super.getServiceSettings(); + } + + @Override + public AmazonBedrockSecretSettings getSecretSettings() { + return (AmazonBedrockSecretSettings) super.getSecretSettings(); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProvider.java new file mode 100644 index 0000000000000..340a5a65f0969 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProvider.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import java.util.Locale; + +public enum AmazonBedrockProvider { + AMAZONTITAN, + ANTHROPIC, + AI21LABS, + COHERE, + META, + MISTRAL; + + public static String NAME = "amazon_bedrock_provider"; + + public static AmazonBedrockProvider fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProviderCapabilities.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProviderCapabilities.java new file mode 100644 index 0000000000000..28b10ef294bda --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockProviderCapabilities.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.DEFAULT_MAX_CHUNK_SIZE; + +public final class AmazonBedrockProviderCapabilities { + private static final List embeddingProviders = List.of( + AmazonBedrockProvider.AMAZONTITAN, + AmazonBedrockProvider.COHERE + ); + + private static final List chatCompletionProviders = List.of( + AmazonBedrockProvider.AMAZONTITAN, + AmazonBedrockProvider.ANTHROPIC, + AmazonBedrockProvider.AI21LABS, + AmazonBedrockProvider.COHERE, + AmazonBedrockProvider.META, + AmazonBedrockProvider.MISTRAL + ); + + private static final List chatCompletionProvidersWithTopK = List.of( + AmazonBedrockProvider.ANTHROPIC, + AmazonBedrockProvider.COHERE, + AmazonBedrockProvider.MISTRAL + ); + + private static final Map embeddingsDefaultSimilarityMeasure = Map.of( + AmazonBedrockProvider.AMAZONTITAN, + SimilarityMeasure.COSINE, + AmazonBedrockProvider.COHERE, + SimilarityMeasure.DOT_PRODUCT + ); + + private static final Map embeddingsDefaultChunkSize = Map.of( + AmazonBedrockProvider.AMAZONTITAN, + 8192, + AmazonBedrockProvider.COHERE, + 2048 + ); + + private static final Map embeddingsMaxBatchSize = Map.of( + AmazonBedrockProvider.AMAZONTITAN, + 1, + AmazonBedrockProvider.COHERE, + 96 + ); + + public static boolean providerAllowsTaskType(AmazonBedrockProvider provider, TaskType taskType) { + switch (taskType) { + case COMPLETION -> { + return chatCompletionProviders.contains(provider); + } + case TEXT_EMBEDDING -> { + return embeddingProviders.contains(provider); + } + default -> { + return false; + } + } + } + + public static boolean chatCompletionProviderHasTopKParameter(AmazonBedrockProvider provider) { + return chatCompletionProvidersWithTopK.contains(provider); + } + + public static SimilarityMeasure getProviderDefaultSimilarityMeasure(AmazonBedrockProvider provider) { + if (embeddingsDefaultSimilarityMeasure.containsKey(provider)) { + return embeddingsDefaultSimilarityMeasure.get(provider); + } + + return SimilarityMeasure.COSINE; + } + + public static int getEmbeddingsProviderDefaultChunkSize(AmazonBedrockProvider provider) { + if (embeddingsDefaultChunkSize.containsKey(provider)) { + return embeddingsDefaultChunkSize.get(provider); + } + + return DEFAULT_MAX_CHUNK_SIZE; + } + + public static int getEmbeddingsMaxBatchSize(AmazonBedrockProvider provider) { + if (embeddingsMaxBatchSize.containsKey(provider)) { + return embeddingsMaxBatchSize.get(provider); + } + + return 1; + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java new file mode 100644 index 0000000000000..9e6328ce1c358 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettings.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredSecureString; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.ACCESS_KEY_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.SECRET_KEY_FIELD; + +public class AmazonBedrockSecretSettings implements SecretSettings { + public static final String NAME = "amazon_bedrock_secret_settings"; + + public final SecureString accessKey; + public final SecureString secretKey; + + public static AmazonBedrockSecretSettings fromMap(@Nullable Map map) { + if (map == null) { + return null; + } + + ValidationException validationException = new ValidationException(); + SecureString secureAccessKey = extractRequiredSecureString( + map, + ACCESS_KEY_FIELD, + ModelSecrets.SECRET_SETTINGS, + validationException + ); + SecureString secureSecretKey = extractRequiredSecureString( + map, + SECRET_KEY_FIELD, + ModelSecrets.SECRET_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockSecretSettings(secureAccessKey, secureSecretKey); + } + + public AmazonBedrockSecretSettings(SecureString accessKey, SecureString secretKey) { + this.accessKey = Objects.requireNonNull(accessKey); + this.secretKey = Objects.requireNonNull(secretKey); + } + + public AmazonBedrockSecretSettings(StreamInput in) throws IOException { + this.accessKey = in.readSecureString(); + this.secretKey = in.readSecureString(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeSecureString(accessKey); + out.writeSecureString(secretKey); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.field(ACCESS_KEY_FIELD, accessKey.toString()); + builder.field(SECRET_KEY_FIELD, secretKey.toString()); + + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + AmazonBedrockSecretSettings that = (AmazonBedrockSecretSettings) object; + return Objects.equals(accessKey, that.accessKey) && Objects.equals(secretKey, that.secretKey); + } + + @Override + public int hashCode() { + return Objects.hash(accessKey, secretKey); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java new file mode 100644 index 0000000000000..459ca367058f8 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java @@ -0,0 +1,328 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionCreator; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.SenderService; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.chatCompletionProviderHasTopKParameter; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.getEmbeddingsMaxBatchSize; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.getProviderDefaultSimilarityMeasure; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.providerAllowsTaskType; + +public class AmazonBedrockService extends SenderService { + public static final String NAME = "amazonbedrock"; + + private final Sender amazonBedrockSender; + + public AmazonBedrockService( + HttpRequestSender.Factory httpSenderFactory, + AmazonBedrockRequestSender.Factory amazonBedrockFactory, + ServiceComponents serviceComponents + ) { + super(httpSenderFactory, serviceComponents); + this.amazonBedrockSender = amazonBedrockFactory.createSender(); + } + + @Override + protected void doInfer( + Model model, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + var actionCreator = new AmazonBedrockActionCreator(amazonBedrockSender, this.getServiceComponents(), timeout); + if (model instanceof AmazonBedrockModel baseAmazonBedrockModel) { + var action = baseAmazonBedrockModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(input), timeout, listener); + } else { + listener.onFailure(createInvalidModelException(model)); + } + } + + @Override + protected void doInfer( + Model model, + String query, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + throw new UnsupportedOperationException("Amazon Bedrock service does not support inference with query input"); + } + + @Override + protected void doChunkedInfer( + Model model, + String query, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + TimeValue timeout, + ActionListener> listener + ) { + var actionCreator = new AmazonBedrockActionCreator(amazonBedrockSender, this.getServiceComponents(), timeout); + if (model instanceof AmazonBedrockModel baseAmazonBedrockModel) { + var maxBatchSize = getEmbeddingsMaxBatchSize(baseAmazonBedrockModel.provider()); + var batchedRequests = new EmbeddingRequestChunker(input, maxBatchSize, EmbeddingRequestChunker.EmbeddingType.FLOAT) + .batchRequestsWithListeners(listener); + for (var request : batchedRequests) { + var action = baseAmazonBedrockModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); + } + } else { + listener.onFailure(createInvalidModelException(model)); + } + } + + @Override + public String name() { + return NAME; + } + + @Override + public void parseRequestConfig( + String modelId, + TaskType taskType, + Map config, + Set platformArchitectures, + ActionListener parsedModelListener + ) { + try { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + + AmazonBedrockModel model = createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + serviceSettingsMap, + TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), + ConfigurationParseContext.REQUEST + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + + parsedModelListener.onResponse(model); + } catch (Exception e) { + parsedModelListener.onFailure(e); + } + } + + @Override + public Model parsePersistedConfigWithSecrets( + String modelId, + TaskType taskType, + Map config, + Map secrets + ) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + Map secretSettingsMap = removeFromMapOrDefaultEmpty(secrets, ModelSecrets.SECRET_SETTINGS); + + return createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + secretSettingsMap, + parsePersistedConfigErrorMsg(modelId, NAME), + ConfigurationParseContext.PERSISTENT + ); + } + + @Override + public Model parsePersistedConfig(String modelId, TaskType taskType, Map config) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + + return createModel( + modelId, + taskType, + serviceSettingsMap, + taskSettingsMap, + null, + parsePersistedConfigErrorMsg(modelId, NAME), + ConfigurationParseContext.PERSISTENT + ); + } + + private static AmazonBedrockModel createModel( + String inferenceEntityId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + @Nullable Map secretSettings, + String failureMessage, + ConfigurationParseContext context + ) { + switch (taskType) { + case TEXT_EMBEDDING -> { + var model = new AmazonBedrockEmbeddingsModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings, + context + ); + checkProviderForTask(TaskType.TEXT_EMBEDDING, model.provider()); + return model; + } + case COMPLETION -> { + var model = new AmazonBedrockChatCompletionModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings, + context + ); + checkProviderForTask(TaskType.COMPLETION, model.provider()); + checkChatCompletionProviderForTopKParameter(model); + return model; + } + default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); + } + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + /** + * For text embedding models get the embedding size and + * update the service settings. + * + * @param model The new model + * @param listener The listener + */ + @Override + public void checkModelConfig(Model model, ActionListener listener) { + if (model instanceof AmazonBedrockEmbeddingsModel embeddingsModel) { + ServiceUtils.getEmbeddingSize( + model, + this, + listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) + ); + } else { + listener.onResponse(model); + } + } + + private AmazonBedrockEmbeddingsModel updateModelWithEmbeddingDetails(AmazonBedrockEmbeddingsModel model, int embeddingSize) { + AmazonBedrockEmbeddingsServiceSettings serviceSettings = model.getServiceSettings(); + if (serviceSettings.dimensionsSetByUser() + && serviceSettings.dimensions() != null + && serviceSettings.dimensions() != embeddingSize) { + throw new ElasticsearchStatusException( + Strings.format( + "The retrieved embeddings size [%s] does not match the size specified in the settings [%s]. " + + "Please recreate the [%s] configuration with the correct dimensions", + embeddingSize, + serviceSettings.dimensions(), + model.getConfigurations().getInferenceEntityId() + ), + RestStatus.BAD_REQUEST + ); + } + + var similarityFromModel = serviceSettings.similarity(); + var similarityToUse = similarityFromModel == null ? getProviderDefaultSimilarityMeasure(model.provider()) : similarityFromModel; + + AmazonBedrockEmbeddingsServiceSettings settingsToUse = new AmazonBedrockEmbeddingsServiceSettings( + serviceSettings.region(), + serviceSettings.model(), + serviceSettings.provider(), + embeddingSize, + serviceSettings.dimensionsSetByUser(), + serviceSettings.maxInputTokens(), + similarityToUse, + serviceSettings.rateLimitSettings() + ); + + return new AmazonBedrockEmbeddingsModel(model, settingsToUse); + } + + private static void checkProviderForTask(TaskType taskType, AmazonBedrockProvider provider) { + if (providerAllowsTaskType(provider, taskType) == false) { + throw new ElasticsearchStatusException( + Strings.format("The [%s] task type for provider [%s] is not available", taskType, provider), + RestStatus.BAD_REQUEST + ); + } + } + + private static void checkChatCompletionProviderForTopKParameter(AmazonBedrockChatCompletionModel model) { + var taskSettings = model.getTaskSettings(); + if (taskSettings.topK() != null) { + if (chatCompletionProviderHasTopKParameter(model.provider()) == false) { + throw new ElasticsearchStatusException( + Strings.format("The [%s] task parameter is not available for provider [%s]", TOP_K_FIELD, model.provider()), + RestStatus.BAD_REQUEST + ); + } + } + } + + @Override + public void close() throws IOException { + super.close(); + IOUtils.closeWhileHandlingException(amazonBedrockSender); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceSettings.java new file mode 100644 index 0000000000000..13c7c0a8c5938 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceSettings.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.settings.FilteredXContentObject; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredEnum; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; + +public abstract class AmazonBedrockServiceSettings extends FilteredXContentObject implements ServiceSettings { + + protected static final String AMAZON_BEDROCK_BASE_NAME = "amazon_bedrock"; + + protected final String region; + protected final String model; + protected final AmazonBedrockProvider provider; + protected final RateLimitSettings rateLimitSettings; + + // the default requests per minute are defined as per-model in the "Runtime quotas" on AWS + // see: https://docs.aws.amazon.com/bedrock/latest/userguide/quotas.html + // setting this to 240 requests per minute (4 requests / sec) is a sane default for us as it should be enough for + // decent throughput without exceeding the minimal for _most_ items. The user should consult + // the table above if using a model that might have a lesser limit (e.g. Anthropic Claude 3.5) + protected static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(240); + + protected static AmazonBedrockServiceSettings.BaseAmazonBedrockCommonSettings fromMap( + Map map, + ValidationException validationException, + ConfigurationParseContext context + ) { + String model = extractRequiredString(map, MODEL_FIELD, ModelConfigurations.SERVICE_SETTINGS, validationException); + String region = extractRequiredString(map, REGION_FIELD, ModelConfigurations.SERVICE_SETTINGS, validationException); + AmazonBedrockProvider provider = extractRequiredEnum( + map, + PROVIDER_FIELD, + ModelConfigurations.SERVICE_SETTINGS, + AmazonBedrockProvider::fromString, + EnumSet.allOf(AmazonBedrockProvider.class), + validationException + ); + RateLimitSettings rateLimitSettings = RateLimitSettings.of( + map, + DEFAULT_RATE_LIMIT_SETTINGS, + validationException, + AMAZON_BEDROCK_BASE_NAME, + context + ); + + return new BaseAmazonBedrockCommonSettings(region, model, provider, rateLimitSettings); + } + + protected record BaseAmazonBedrockCommonSettings( + String region, + String model, + AmazonBedrockProvider provider, + @Nullable RateLimitSettings rateLimitSettings + ) {} + + protected AmazonBedrockServiceSettings(StreamInput in) throws IOException { + this.region = in.readString(); + this.model = in.readString(); + this.provider = in.readEnum(AmazonBedrockProvider.class); + this.rateLimitSettings = new RateLimitSettings(in); + } + + protected AmazonBedrockServiceSettings( + String region, + String model, + AmazonBedrockProvider provider, + @Nullable RateLimitSettings rateLimitSettings + ) { + this.region = Objects.requireNonNull(region); + this.model = Objects.requireNonNull(model); + this.provider = Objects.requireNonNull(provider); + this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + public String region() { + return region; + } + + public String model() { + return model; + } + + public AmazonBedrockProvider provider() { + return provider; + } + + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(region); + out.writeString(model); + out.writeEnum(provider); + rateLimitSettings.writeTo(out); + } + + public void addBaseXContent(XContentBuilder builder, Params params) throws IOException { + toXContentFragmentOfExposedFields(builder, params); + } + + protected void addXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + builder.field(REGION_FIELD, region); + builder.field(MODEL_FIELD, model); + builder.field(PROVIDER_FIELD, provider.name()); + rateLimitSettings.toXContent(builder, params); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java new file mode 100644 index 0000000000000..27dc607d671aa --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModel.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionVisitor; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; + +import java.util.Map; + +public class AmazonBedrockChatCompletionModel extends AmazonBedrockModel { + + public static AmazonBedrockChatCompletionModel of(AmazonBedrockChatCompletionModel completionModel, Map taskSettings) { + if (taskSettings == null || taskSettings.isEmpty()) { + return completionModel; + } + + var requestTaskSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(taskSettings); + var taskSettingsToUse = AmazonBedrockChatCompletionTaskSettings.of(completionModel.getTaskSettings(), requestTaskSettings); + return new AmazonBedrockChatCompletionModel(completionModel, taskSettingsToUse); + } + + public AmazonBedrockChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String name, + Map serviceSettings, + Map taskSettings, + Map secretSettings, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + name, + AmazonBedrockChatCompletionServiceSettings.fromMap(serviceSettings, context), + AmazonBedrockChatCompletionTaskSettings.fromMap(taskSettings), + AmazonBedrockSecretSettings.fromMap(secretSettings) + ); + } + + public AmazonBedrockChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String service, + AmazonBedrockChatCompletionServiceSettings serviceSettings, + AmazonBedrockChatCompletionTaskSettings taskSettings, + AmazonBedrockSecretSettings secrets + ) { + super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secrets)); + } + + public AmazonBedrockChatCompletionModel(Model model, TaskSettings taskSettings) { + super(model, taskSettings); + } + + @Override + public ExecutableAction accept(AmazonBedrockActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } + + @Override + public AmazonBedrockChatCompletionServiceSettings getServiceSettings() { + return (AmazonBedrockChatCompletionServiceSettings) super.getServiceSettings(); + } + + @Override + public AmazonBedrockChatCompletionTaskSettings getTaskSettings() { + return (AmazonBedrockChatCompletionTaskSettings) super.getTaskSettings(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettings.java new file mode 100644 index 0000000000000..5985dcd56c5d2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettings.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalDoubleInRange; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MIN_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; + +public record AmazonBedrockChatCompletionRequestTaskSettings( + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens +) { + + public static final AmazonBedrockChatCompletionRequestTaskSettings EMPTY_SETTINGS = new AmazonBedrockChatCompletionRequestTaskSettings( + null, + null, + null, + null + ); + + /** + * Extracts the task settings from a map. All settings are considered optional and the absence of a setting + * does not throw an error. + * + * @param map the settings received from a request + * @return a {@link AmazonBedrockChatCompletionRequestTaskSettings} + */ + public static AmazonBedrockChatCompletionRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return AmazonBedrockChatCompletionRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + var temperature = extractOptionalDoubleInRange( + map, + TEMPERATURE_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + var topP = extractOptionalDoubleInRange( + map, + TOP_P_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + var topK = extractOptionalDoubleInRange( + map, + TOP_K_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Integer maxNewTokens = extractOptionalPositiveInteger( + map, + MAX_NEW_TOKENS_FIELD, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockChatCompletionRequestTaskSettings(temperature, topP, topK, maxNewTokens); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettings.java new file mode 100644 index 0000000000000..fc3d09c6eea7a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettings.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class AmazonBedrockChatCompletionServiceSettings extends AmazonBedrockServiceSettings { + public static final String NAME = "amazon_bedrock_chat_completion_service_settings"; + + public static AmazonBedrockChatCompletionServiceSettings fromMap( + Map serviceSettings, + ConfigurationParseContext context + ) { + ValidationException validationException = new ValidationException(); + + var baseSettings = AmazonBedrockServiceSettings.fromMap(serviceSettings, validationException, context); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockChatCompletionServiceSettings( + baseSettings.region(), + baseSettings.model(), + baseSettings.provider(), + baseSettings.rateLimitSettings() + ); + } + + public AmazonBedrockChatCompletionServiceSettings( + String region, + String model, + AmazonBedrockProvider provider, + RateLimitSettings rateLimitSettings + ) { + super(region, model, provider, rateLimitSettings); + } + + public AmazonBedrockChatCompletionServiceSettings(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + super.addBaseXContent(builder, params); + builder.endObject(); + return builder; + } + + @Override + protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + super.addXContentFragmentOfExposedFields(builder, params); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockChatCompletionServiceSettings that = (AmazonBedrockChatCompletionServiceSettings) o; + + return Objects.equals(region, that.region) + && Objects.equals(provider, that.provider) + && Objects.equals(model, that.model) + && Objects.equals(rateLimitSettings, that.rateLimitSettings); + } + + @Override + public int hashCode() { + return Objects.hash(region, model, provider, rateLimitSettings); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettings.java new file mode 100644 index 0000000000000..e689e68794e1f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettings.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.TransportVersions.ML_INFERENCE_AMAZON_BEDROCK_ADDED; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalDoubleInRange; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MIN_TEMPERATURE_TOP_P_TOP_K_VALUE; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; + +public class AmazonBedrockChatCompletionTaskSettings implements TaskSettings { + public static final String NAME = "amazon_bedrock_chat_completion_task_settings"; + + public static final AmazonBedrockChatCompletionRequestTaskSettings EMPTY_SETTINGS = new AmazonBedrockChatCompletionRequestTaskSettings( + null, + null, + null, + null + ); + + public static AmazonBedrockChatCompletionTaskSettings fromMap(Map settings) { + ValidationException validationException = new ValidationException(); + + Double temperature = extractOptionalDoubleInRange( + settings, + TEMPERATURE_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Double topP = extractOptionalDoubleInRange( + settings, + TOP_P_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Double topK = extractOptionalDoubleInRange( + settings, + TOP_K_FIELD, + MIN_TEMPERATURE_TOP_P_TOP_K_VALUE, + MAX_TEMPERATURE_TOP_P_TOP_K_VALUE, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + Integer maxNewTokens = extractOptionalPositiveInteger( + settings, + MAX_NEW_TOKENS_FIELD, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AmazonBedrockChatCompletionTaskSettings(temperature, topP, topK, maxNewTokens); + } + + public static AmazonBedrockChatCompletionTaskSettings of( + AmazonBedrockChatCompletionTaskSettings originalSettings, + AmazonBedrockChatCompletionRequestTaskSettings requestSettings + ) { + var temperature = requestSettings.temperature() == null ? originalSettings.temperature() : requestSettings.temperature(); + var topP = requestSettings.topP() == null ? originalSettings.topP() : requestSettings.topP(); + var topK = requestSettings.topK() == null ? originalSettings.topK() : requestSettings.topK(); + var maxNewTokens = requestSettings.maxNewTokens() == null ? originalSettings.maxNewTokens() : requestSettings.maxNewTokens(); + + return new AmazonBedrockChatCompletionTaskSettings(temperature, topP, topK, maxNewTokens); + } + + private final Double temperature; + private final Double topP; + private final Double topK; + private final Integer maxNewTokens; + + public AmazonBedrockChatCompletionTaskSettings( + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens + ) { + this.temperature = temperature; + this.topP = topP; + this.topK = topK; + this.maxNewTokens = maxNewTokens; + } + + public AmazonBedrockChatCompletionTaskSettings(StreamInput in) throws IOException { + this.temperature = in.readOptionalDouble(); + this.topP = in.readOptionalDouble(); + this.topK = in.readOptionalDouble(); + this.maxNewTokens = in.readOptionalVInt(); + } + + public Double temperature() { + return temperature; + } + + public Double topP() { + return topP; + } + + public Double topK() { + return topK; + } + + public Integer maxNewTokens() { + return maxNewTokens; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return ML_INFERENCE_AMAZON_BEDROCK_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalDouble(temperature); + out.writeOptionalDouble(topP); + out.writeOptionalDouble(topK); + out.writeOptionalVInt(maxNewTokens); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + if (temperature != null) { + builder.field(TEMPERATURE_FIELD, temperature); + } + if (topP != null) { + builder.field(TOP_P_FIELD, topP); + } + if (topK != null) { + builder.field(TOP_K_FIELD, topK); + } + if (maxNewTokens != null) { + builder.field(MAX_NEW_TOKENS_FIELD, maxNewTokens); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockChatCompletionTaskSettings that = (AmazonBedrockChatCompletionTaskSettings) o; + return Objects.equals(temperature, that.temperature) + && Objects.equals(topP, that.topP) + && Objects.equals(topK, that.topK) + && Objects.equals(maxNewTokens, that.maxNewTokens); + } + + @Override + public int hashCode() { + return Objects.hash(temperature, topP, topK, maxNewTokens); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModel.java new file mode 100644 index 0000000000000..0e3a954a03279 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModel.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.amazonbedrock.AmazonBedrockActionVisitor; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; + +import java.util.Map; + +public class AmazonBedrockEmbeddingsModel extends AmazonBedrockModel { + + public static AmazonBedrockEmbeddingsModel of(AmazonBedrockEmbeddingsModel embeddingsModel, Map taskSettings) { + if (taskSettings != null && taskSettings.isEmpty() == false) { + // no task settings allowed + var validationException = new ValidationException(); + validationException.addValidationError("Amazon Bedrock embeddings model cannot have task settings"); + throw validationException; + } + + return embeddingsModel; + } + + public AmazonBedrockEmbeddingsModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + Map secretSettings, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + service, + AmazonBedrockEmbeddingsServiceSettings.fromMap(serviceSettings, context), + new EmptyTaskSettings(), + AmazonBedrockSecretSettings.fromMap(secretSettings) + ); + } + + public AmazonBedrockEmbeddingsModel( + String inferenceEntityId, + TaskType taskType, + String service, + AmazonBedrockEmbeddingsServiceSettings serviceSettings, + TaskSettings taskSettings, + AmazonBedrockSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, new EmptyTaskSettings()), + new ModelSecrets(secrets) + ); + } + + public AmazonBedrockEmbeddingsModel(Model model, ServiceSettings serviceSettings) { + super(model, serviceSettings); + } + + @Override + public ExecutableAction accept(AmazonBedrockActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } + + @Override + public AmazonBedrockEmbeddingsServiceSettings getServiceSettings() { + return (AmazonBedrockEmbeddingsServiceSettings) super.getServiceSettings(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettings.java new file mode 100644 index 0000000000000..4bf037558c618 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettings.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.DIMENSIONS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; + +public class AmazonBedrockEmbeddingsServiceSettings extends AmazonBedrockServiceSettings { + public static final String NAME = "amazon_bedrock_embeddings_service_settings"; + static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user"; + + private final Integer dimensions; + private final Boolean dimensionsSetByUser; + private final Integer maxInputTokens; + private final SimilarityMeasure similarity; + + public static AmazonBedrockEmbeddingsServiceSettings fromMap(Map map, ConfigurationParseContext context) { + ValidationException validationException = new ValidationException(); + + var settings = embeddingSettingsFromMap(map, validationException, context); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return settings; + } + + private static AmazonBedrockEmbeddingsServiceSettings embeddingSettingsFromMap( + Map map, + ValidationException validationException, + ConfigurationParseContext context + ) { + var baseSettings = AmazonBedrockServiceSettings.fromMap(map, validationException, context); + SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); + + Integer maxTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); + + Boolean dimensionsSetByUser = extractOptionalBoolean(map, DIMENSIONS_SET_BY_USER, validationException); + + switch (context) { + case REQUEST -> { + if (dimensionsSetByUser != null) { + validationException.addValidationError( + ServiceUtils.invalidSettingError(DIMENSIONS_SET_BY_USER, ModelConfigurations.SERVICE_SETTINGS) + ); + } + + if (dims != null) { + validationException.addValidationError( + ServiceUtils.invalidSettingError(DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS) + ); + } + dimensionsSetByUser = false; + } + case PERSISTENT -> { + if (dimensionsSetByUser == null) { + validationException.addValidationError( + ServiceUtils.missingSettingErrorMsg(DIMENSIONS_SET_BY_USER, ModelConfigurations.SERVICE_SETTINGS) + ); + } + } + } + return new AmazonBedrockEmbeddingsServiceSettings( + baseSettings.region(), + baseSettings.model(), + baseSettings.provider(), + dims, + dimensionsSetByUser, + maxTokens, + similarity, + baseSettings.rateLimitSettings() + ); + } + + public AmazonBedrockEmbeddingsServiceSettings(StreamInput in) throws IOException { + super(in); + dimensions = in.readOptionalVInt(); + dimensionsSetByUser = in.readBoolean(); + maxInputTokens = in.readOptionalVInt(); + similarity = in.readOptionalEnum(SimilarityMeasure.class); + } + + public AmazonBedrockEmbeddingsServiceSettings( + String region, + String model, + AmazonBedrockProvider provider, + @Nullable Integer dimensions, + Boolean dimensionsSetByUser, + @Nullable Integer maxInputTokens, + @Nullable SimilarityMeasure similarity, + RateLimitSettings rateLimitSettings + ) { + super(region, model, provider, rateLimitSettings); + this.dimensions = dimensions; + this.dimensionsSetByUser = dimensionsSetByUser; + this.maxInputTokens = maxInputTokens; + this.similarity = similarity; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalVInt(dimensions); + out.writeBoolean(dimensionsSetByUser); + out.writeOptionalVInt(maxInputTokens); + out.writeOptionalEnum(similarity); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + super.addBaseXContent(builder, params); + builder.field(DIMENSIONS_SET_BY_USER, dimensionsSetByUser); + + builder.endObject(); + return builder; + } + + @Override + protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + super.addXContentFragmentOfExposedFields(builder, params); + + if (dimensions != null) { + builder.field(DIMENSIONS, dimensions); + } + if (maxInputTokens != null) { + builder.field(MAX_INPUT_TOKENS, maxInputTokens); + } + if (similarity != null) { + builder.field(SIMILARITY, similarity); + } + + return builder; + } + + @Override + public SimilarityMeasure similarity() { + return similarity; + } + + @Override + public Integer dimensions() { + return dimensions; + } + + public boolean dimensionsSetByUser() { + return this.dimensionsSetByUser; + } + + public Integer maxInputTokens() { + return maxInputTokens; + } + + @Override + public DenseVectorFieldMapper.ElementType elementType() { + return DenseVectorFieldMapper.ElementType.FLOAT; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AmazonBedrockEmbeddingsServiceSettings that = (AmazonBedrockEmbeddingsServiceSettings) o; + + return Objects.equals(region, that.region) + && Objects.equals(provider, that.provider) + && Objects.equals(model, that.model) + && Objects.equals(dimensions, that.dimensions) + && Objects.equals(dimensionsSetByUser, that.dimensionsSetByUser) + && Objects.equals(maxInputTokens, that.maxInputTokens) + && Objects.equals(similarity, that.similarity) + && Objects.equals(rateLimitSettings, that.rateLimitSettings); + } + + @Override + public int hashCode() { + return Objects.hash(region, model, provider, dimensions, dimensionsSetByUser, maxInputTokens, similarity, rateLimitSettings); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicModel.java new file mode 100644 index 0000000000000..88d4e3e0d0c82 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicModel.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.anthropic.AnthropicActionVisitor; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.settings.ApiKeySecrets; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; +import java.util.Objects; + +public abstract class AnthropicModel extends Model { + + private final AnthropicRateLimitServiceSettings rateLimitServiceSettings; + private final SecureString apiKey; + private final URI uri; + + public AnthropicModel( + ModelConfigurations configurations, + ModelSecrets secrets, + AnthropicRateLimitServiceSettings rateLimitServiceSettings, + CheckedSupplier uriSupplier, + @Nullable ApiKeySecrets apiKeySecrets + ) { + super(configurations, secrets); + + this.rateLimitServiceSettings = Objects.requireNonNull(rateLimitServiceSettings); + apiKey = ServiceUtils.apiKey(apiKeySecrets); + + try { + uri = uriSupplier.get(); + } catch (URISyntaxException e) { + throw new ElasticsearchStatusException( + Strings.format("Failed to construct %s URL", configurations.getService()), + RestStatus.BAD_REQUEST, + e + ); + } + } + + protected AnthropicModel(AnthropicModel model, TaskSettings taskSettings) { + super(model, taskSettings); + + rateLimitServiceSettings = model.rateLimitServiceSettings(); + apiKey = model.apiKey(); + uri = model.getUri(); + } + + protected AnthropicModel(AnthropicModel model, ServiceSettings serviceSettings) { + super(model, serviceSettings); + + rateLimitServiceSettings = model.rateLimitServiceSettings(); + apiKey = model.apiKey(); + uri = model.getUri(); + } + + public URI getUri() { + return uri; + } + + public SecureString apiKey() { + return apiKey; + } + + public AnthropicRateLimitServiceSettings rateLimitServiceSettings() { + return rateLimitServiceSettings; + } + + public abstract ExecutableAction accept(AnthropicActionVisitor creator, Map taskSettings); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicRateLimitServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicRateLimitServiceSettings.java new file mode 100644 index 0000000000000..1d452e6415bc9 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicRateLimitServiceSettings.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic; + +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +/** + * The service setting fields for anthropic that determine how to rate limit requests. + */ +public interface AnthropicRateLimitServiceSettings { + String modelId(); + + RateLimitSettings rateLimitSettings(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java new file mode 100644 index 0000000000000..d1db6f260351b --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.external.action.anthropic.AnthropicActionCreator; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.SenderService; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; + +public class AnthropicService extends SenderService { + public static final String NAME = "anthropic"; + + public AnthropicService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { + super(factory, serviceComponents); + } + + @Override + public String name() { + return NAME; + } + + @Override + public void parseRequestConfig( + String inferenceEntityId, + TaskType taskType, + Map config, + Set platformArchitectures, + ActionListener parsedModelListener + ) { + try { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + + AnthropicModel model = createModel( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + serviceSettingsMap, + TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), + ConfigurationParseContext.REQUEST + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + + parsedModelListener.onResponse(model); + } catch (Exception e) { + parsedModelListener.onFailure(e); + } + } + + private static AnthropicModel createModelFromPersistent( + String inferenceEntityId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + @Nullable Map secretSettings, + String failureMessage + ) { + return createModel( + inferenceEntityId, + taskType, + serviceSettings, + taskSettings, + secretSettings, + failureMessage, + ConfigurationParseContext.PERSISTENT + ); + } + + private static AnthropicModel createModel( + String inferenceEntityId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + @Nullable Map secretSettings, + String failureMessage, + ConfigurationParseContext context + ) { + return switch (taskType) { + case COMPLETION -> new AnthropicChatCompletionModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings, + context + ); + default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); + }; + } + + @Override + public AnthropicModel parsePersistedConfigWithSecrets( + String inferenceEntityId, + TaskType taskType, + Map config, + Map secrets + ) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + Map secretSettingsMap = removeFromMapOrDefaultEmpty(secrets, ModelSecrets.SECRET_SETTINGS); + + return createModelFromPersistent( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + secretSettingsMap, + parsePersistedConfigErrorMsg(inferenceEntityId, NAME) + ); + } + + @Override + public AnthropicModel parsePersistedConfig(String inferenceEntityId, TaskType taskType, Map config) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + + return createModelFromPersistent( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + null, + parsePersistedConfigErrorMsg(inferenceEntityId, NAME) + ); + } + + @Override + public void doInfer( + Model model, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + if (model instanceof AnthropicModel == false) { + listener.onFailure(createInvalidModelException(model)); + return; + } + + AnthropicModel anthropicModel = (AnthropicModel) model; + var actionCreator = new AnthropicActionCreator(getSender(), getServiceComponents()); + + var action = anthropicModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(input), timeout, listener); + } + + @Override + protected void doInfer( + Model model, + String query, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + throw new UnsupportedOperationException("Anthropic service does not support inference with query input"); + } + + @Override + protected void doChunkedInfer( + Model model, + @Nullable String query, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + TimeValue timeout, + ActionListener> listener + ) { + throw new UnsupportedOperationException("Anthropic service does not support chunked inference"); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_ANTHROPIC_INTEGRATION_ADDED; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceFields.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceFields.java new file mode 100644 index 0000000000000..f633df963a098 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceFields.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic; + +public class AnthropicServiceFields { + + public static final String MAX_TOKENS = "max_tokens"; + public static final String TEMPERATURE_FIELD = "temperature"; + public static final String TOP_P_FIELD = "top_p"; + public static final String TOP_K_FIELD = "top_k"; +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModel.java new file mode 100644 index 0000000000000..942cae8960daf --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModel.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.apache.http.client.utils.URIBuilder; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.anthropic.AnthropicActionVisitor; +import org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicRequestUtils; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicModel; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; + +public class AnthropicChatCompletionModel extends AnthropicModel { + + public static AnthropicChatCompletionModel of(AnthropicChatCompletionModel model, Map taskSettings) { + if (taskSettings == null || taskSettings.isEmpty()) { + return model; + } + + var requestTaskSettings = AnthropicChatCompletionRequestTaskSettings.fromMap(taskSettings); + return new AnthropicChatCompletionModel( + model, + AnthropicChatCompletionTaskSettings.of(model.getTaskSettings(), requestTaskSettings) + ); + } + + public AnthropicChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + @Nullable Map secrets, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + service, + AnthropicChatCompletionServiceSettings.fromMap(serviceSettings, context), + AnthropicChatCompletionTaskSettings.fromMap(taskSettings, context), + DefaultSecretSettings.fromMap(secrets) + ); + } + + AnthropicChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String service, + AnthropicChatCompletionServiceSettings serviceSettings, + AnthropicChatCompletionTaskSettings taskSettings, + @Nullable DefaultSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), + new ModelSecrets(secrets), + serviceSettings, + AnthropicChatCompletionModel::buildDefaultUri, + secrets + ); + } + + // This should only be used for testing + AnthropicChatCompletionModel( + String inferenceEntityId, + TaskType taskType, + String service, + String url, + AnthropicChatCompletionServiceSettings serviceSettings, + AnthropicChatCompletionTaskSettings taskSettings, + @Nullable DefaultSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), + new ModelSecrets(secrets), + serviceSettings, + () -> ServiceUtils.createUri(url), + secrets + ); + } + + private AnthropicChatCompletionModel(AnthropicChatCompletionModel originalModel, AnthropicChatCompletionTaskSettings taskSettings) { + super(originalModel, taskSettings); + } + + @Override + public AnthropicChatCompletionServiceSettings getServiceSettings() { + return (AnthropicChatCompletionServiceSettings) super.getServiceSettings(); + } + + @Override + public AnthropicChatCompletionTaskSettings getTaskSettings() { + return (AnthropicChatCompletionTaskSettings) super.getTaskSettings(); + } + + @Override + public DefaultSecretSettings getSecretSettings() { + return (DefaultSecretSettings) super.getSecretSettings(); + } + + @Override + public ExecutableAction accept(AnthropicActionVisitor creator, Map taskSettings) { + return creator.create(this, taskSettings); + } + + private static URI buildDefaultUri() throws URISyntaxException { + return new URIBuilder().setScheme("https") + .setHost(AnthropicRequestUtils.HOST) + .setPathSegments(AnthropicRequestUtils.API_VERSION_1, AnthropicRequestUtils.MESSAGES_PATH) + .build(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionRequestTaskSettings.java new file mode 100644 index 0000000000000..85fdc12685fde --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionRequestTaskSettings.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.MAX_TOKENS; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TOP_P_FIELD; + +/** + * This class handles extracting Anthropic task settings from a request. The difference between this class and + * {@link AnthropicChatCompletionTaskSettings} is that this class considers all fields as optional. It will not throw an error if a field + * is missing. This allows overriding persistent task settings. + * @param maxTokens the number of tokens to generate before stopping + */ +public record AnthropicChatCompletionRequestTaskSettings( + @Nullable Integer maxTokens, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer topK +) { + + public static final AnthropicChatCompletionRequestTaskSettings EMPTY_SETTINGS = new AnthropicChatCompletionRequestTaskSettings( + null, + null, + null, + null + ); + + /** + * Extracts the task settings from a map. All settings are considered optional and the absence of a setting + * does not throw an error. + * + * @param map the settings received from a request + * @return a {@link AnthropicChatCompletionRequestTaskSettings} + */ + public static AnthropicChatCompletionRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return AnthropicChatCompletionRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + Integer maxTokens = extractOptionalPositiveInteger(map, MAX_TOKENS, ModelConfigurations.SERVICE_SETTINGS, validationException); + // At the time of writing the allowed values are -1, and range 0-1. I'm intentionally not validating the values here, we'll let + // Anthropic return an error when we send it instead. + Double temperature = removeAsType(map, TEMPERATURE_FIELD, Double.class); + Double topP = removeAsType(map, TOP_P_FIELD, Double.class); + Integer topK = removeAsType(map, TOP_K_FIELD, Integer.class); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AnthropicChatCompletionRequestTaskSettings(maxTokens, temperature, topP, topK); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionServiceSettings.java new file mode 100644 index 0000000000000..3a70a26a82387 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionServiceSettings.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicRateLimitServiceSettings; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicService; +import org.elasticsearch.xpack.inference.services.settings.FilteredXContentObject; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; + +/** + * Defines the service settings for interacting with Anthropic's chat completion models. + */ +public class AnthropicChatCompletionServiceSettings extends FilteredXContentObject + implements + ServiceSettings, + AnthropicRateLimitServiceSettings { + + public static final String NAME = "anthropic_completion_service_settings"; + + // The rate limit for build tier 1 is 50 request per minute + // Details are here https://docs.anthropic.com/en/api/rate-limits + private static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(50); + + public static AnthropicChatCompletionServiceSettings fromMap(Map map, ConfigurationParseContext context) { + ValidationException validationException = new ValidationException(); + + String modelId = extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + + RateLimitSettings rateLimitSettings = RateLimitSettings.of( + map, + DEFAULT_RATE_LIMIT_SETTINGS, + validationException, + AnthropicService.NAME, + context + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AnthropicChatCompletionServiceSettings(modelId, rateLimitSettings); + } + + private final String modelId; + + private final RateLimitSettings rateLimitSettings; + + public AnthropicChatCompletionServiceSettings(String modelId, @Nullable RateLimitSettings ratelimitSettings) { + this.modelId = Objects.requireNonNull(modelId); + this.rateLimitSettings = Objects.requireNonNullElse(ratelimitSettings, DEFAULT_RATE_LIMIT_SETTINGS); + } + + public AnthropicChatCompletionServiceSettings(StreamInput in) throws IOException { + this.modelId = in.readString(); + rateLimitSettings = new RateLimitSettings(in); + } + + @Override + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; + } + + @Override + public String modelId() { + return modelId; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + toXContentFragmentOfExposedFields(builder, params); + + builder.endObject(); + return builder; + } + + @Override + protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + builder.field(MODEL_ID, modelId); + + rateLimitSettings.toXContent(builder, params); + + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_COMPLETION_INFERENCE_SERVICE_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(modelId); + rateLimitSettings.writeTo(out); + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + AnthropicChatCompletionServiceSettings that = (AnthropicChatCompletionServiceSettings) object; + return Objects.equals(modelId, that.modelId) && Objects.equals(rateLimitSettings, that.rateLimitSettings); + } + + @Override + public int hashCode() { + return Objects.hash(modelId, rateLimitSettings); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionTaskSettings.java new file mode 100644 index 0000000000000..a1457dda64e40 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionTaskSettings.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.nonNullOrDefault; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.MAX_TOKENS; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields.TOP_P_FIELD; + +public class AnthropicChatCompletionTaskSettings implements TaskSettings { + + public static final String NAME = "anthropic_completion_task_settings"; + + public static AnthropicChatCompletionTaskSettings fromMap(Map map, ConfigurationParseContext context) { + return switch (context) { + case REQUEST -> fromRequestMap(map); + case PERSISTENT -> fromPersistedMap(map); + }; + } + + private static AnthropicChatCompletionTaskSettings fromRequestMap(Map map) { + ValidationException validationException = new ValidationException(); + + var commonFields = fromMap(map, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new AnthropicChatCompletionTaskSettings(commonFields); + } + + private static AnthropicChatCompletionTaskSettings fromPersistedMap(Map map) { + var commonFields = fromMap(map, new ValidationException()); + + return new AnthropicChatCompletionTaskSettings(commonFields); + } + + private record CommonFields(int maxTokens, Double temperature, Double topP, Integer topK) {} + + private static CommonFields fromMap(Map map, ValidationException validationException) { + Integer maxTokens = extractRequiredPositiveInteger(map, MAX_TOKENS, ModelConfigurations.TASK_SETTINGS, validationException); + + // At the time of writing the allowed values for the temperature field are -1, and range 0-1. + // I'm intentionally not validating the values here, we'll let Anthropic return an error when we send it instead. + Double temperature = removeAsType(map, TEMPERATURE_FIELD, Double.class); + + // I'm intentionally not validating these so that Anthropic will return an error if they aren't in the correct range + Double topP = removeAsType(map, TOP_P_FIELD, Double.class); + Integer topK = removeAsType(map, TOP_K_FIELD, Integer.class); + + return new CommonFields(Objects.requireNonNullElse(maxTokens, -1), temperature, topP, topK); + } + + public static AnthropicChatCompletionTaskSettings of( + AnthropicChatCompletionTaskSettings originalSettings, + AnthropicChatCompletionRequestTaskSettings requestSettings + ) { + return new AnthropicChatCompletionTaskSettings( + Objects.requireNonNullElse(requestSettings.maxTokens(), originalSettings.maxTokens), + nonNullOrDefault(requestSettings.temperature(), originalSettings.temperature), + nonNullOrDefault(requestSettings.topP(), originalSettings.topP), + nonNullOrDefault(requestSettings.topK(), originalSettings.topK) + ); + } + + private final int maxTokens; + private final Double temperature; + private final Double topP; + private final Integer topK; + + public AnthropicChatCompletionTaskSettings(int maxTokens, @Nullable Double temperature, @Nullable Double topP, @Nullable Integer topK) { + this.maxTokens = maxTokens; + this.temperature = temperature; + this.topP = topP; + this.topK = topK; + } + + public AnthropicChatCompletionTaskSettings(StreamInput in) throws IOException { + this.maxTokens = in.readVInt(); + this.temperature = in.readOptionalDouble(); + this.topP = in.readOptionalDouble(); + this.topK = in.readOptionalInt(); + } + + private AnthropicChatCompletionTaskSettings(CommonFields commonFields) { + this(commonFields.maxTokens, commonFields.temperature, commonFields.topP, commonFields.topK); + } + + public int maxTokens() { + return maxTokens; + } + + public Double temperature() { + return temperature; + } + + public Double topP() { + return topP; + } + + public Integer topK() { + return topK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.field(MAX_TOKENS, maxTokens); + + if (temperature != null) { + builder.field(TEMPERATURE_FIELD, temperature); + } + + if (topP != null) { + builder.field(TOP_P_FIELD, topP); + } + + if (topK != null) { + builder.field(TOP_P_FIELD, topK); + } + + builder.endObject(); + + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_ANTHROPIC_INTEGRATION_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(maxTokens); + out.writeOptionalDouble(temperature); + out.writeOptionalDouble(topP); + out.writeOptionalInt(topK); + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + AnthropicChatCompletionTaskSettings that = (AnthropicChatCompletionTaskSettings) object; + return Objects.equals(maxTokens, that.maxTokens) + && Objects.equals(temperature, that.temperature) + && Objects.equals(topP, that.topP) + && Objects.equals(topK, that.topK); + } + + @Override + public int hashCode() { + return Objects.hash(maxTokens, temperature, topP, topK); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java index 65c3db4093249..c4ef5faf8e667 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java @@ -24,7 +24,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.azureaistudio.AzureAiStudioActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettings.java index 1a39cd67a70f3..d4a1fd938625e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettings.java @@ -33,8 +33,8 @@ import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; -import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; public class AzureAiStudioEmbeddingsServiceSettings extends AzureAiStudioServiceSettings { @@ -59,10 +59,15 @@ private static AzureAiStudioEmbeddingCommonFields embeddingSettingsFromMap( ConfigurationParseContext context ) { var baseSettings = AzureAiStudioServiceSettings.fromMap(map, validationException, context); - SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); - Integer dims = removeAsType(map, DIMENSIONS, Integer.class); - Integer maxTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class); + SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); + Integer maxTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); Boolean dimensionsSetByUser = extractOptionalBoolean(map, DIMENSIONS_SET_BY_USER, validationException); switch (context) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java index 5c25ae62517dd..3c75243770f97 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java @@ -24,11 +24,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; -import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; -import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; -import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.azureopenai.AzureOpenAiActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; @@ -44,7 +40,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; @@ -246,19 +241,6 @@ protected void doChunkedInfer( } } - private static List translateToChunkedResults( - List inputs, - InferenceServiceResults inferenceResults - ) { - if (inferenceResults instanceof InferenceTextEmbeddingFloatResults textEmbeddingResults) { - return InferenceChunkedTextEmbeddingFloatResults.listOf(inputs, textEmbeddingResults); - } else if (inferenceResults instanceof ErrorInferenceResults error) { - return List.of(new ErrorChunkedInferenceResults(error.getException())); - } else { - throw createInvalidChunkedResultException(InferenceTextEmbeddingFloatResults.NAME, inferenceResults.getWriteableName()); - } - } - /** * For text embedding models get the embedding size and * update the service settings. diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java index 1c426815a83c0..a9e40569d4e7a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java @@ -33,9 +33,9 @@ import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; -import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.API_VERSION; import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.DEPLOYMENT_ID; import static org.elasticsearch.xpack.inference.services.azureopenai.AzureOpenAiServiceFields.RESOURCE_NAME; @@ -88,8 +88,13 @@ private static CommonFields fromMap( String resourceName = extractRequiredString(map, RESOURCE_NAME, ModelConfigurations.SERVICE_SETTINGS, validationException); String deploymentId = extractRequiredString(map, DEPLOYMENT_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); String apiVersion = extractRequiredString(map, API_VERSION, ModelConfigurations.SERVICE_SETTINGS, validationException); - Integer dims = removeAsType(map, DIMENSIONS, Integer.class); - Integer maxTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); + Integer maxTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); RateLimitSettings rateLimitSettings = RateLimitSettings.of( map, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index 76ef15568d448..2feb1428c4508 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -23,7 +23,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.cohere.CohereActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; @@ -33,7 +33,6 @@ import org.elasticsearch.xpack.inference.services.ServiceComponents; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.cohere.completion.CohereCompletionModel; -import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingType; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsModel; import org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankModel; @@ -280,10 +279,8 @@ public void checkModelConfig(Model model, ActionListener listener) { } private CohereEmbeddingsModel updateModelWithEmbeddingDetails(CohereEmbeddingsModel model, int embeddingSize) { - var similarityFromModel = model.getServiceSettings().similarity(); - var similarityToUse = similarityFromModel == null - ? defaultSimilarity(model.getServiceSettings().getEmbeddingType()) - : similarityFromModel; + var userDefinedSimilarity = model.getServiceSettings().similarity(); + var similarityToUse = userDefinedSimilarity == null ? defaultSimilarity() : userDefinedSimilarity; CohereEmbeddingsServiceSettings serviceSettings = new CohereEmbeddingsServiceSettings( new CohereServiceSettings( @@ -302,25 +299,14 @@ private CohereEmbeddingsModel updateModelWithEmbeddingDetails(CohereEmbeddingsMo /** * Return the default similarity measure for the embedding type. - * Cohere embeddings are normalized to unit vectors so Dot Product - * can be used. However, Elasticsearch rejects the byte vectors with - * Dot Product similarity complaining they are not normalized so - * Cosine is used for bytes. - * TODO investigate why the byte vectors are not normalized. + * Cohere embeddings are normalized to unit vectors therefor Dot + * Product similarity can be used and is the default for all Cohere + * models. * - * @param embeddingType The embedding type (can be null) * @return The default similarity. */ - static SimilarityMeasure defaultSimilarity(@Nullable CohereEmbeddingType embeddingType) { - if (embeddingType == null) { - return SimilarityMeasure.DOT_PRODUCT; - } - - return switch (embeddingType) { - case FLOAT -> SimilarityMeasure.DOT_PRODUCT; - case BYTE -> SimilarityMeasure.COSINE; - case INT8 -> SimilarityMeasure.COSINE; - }; + static SimilarityMeasure defaultSimilarity() { + return SimilarityMeasure.DOT_PRODUCT; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java index d477a8c5a5f55..9e7adef7814c5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java @@ -46,7 +46,7 @@ public class CohereServiceSettings extends FilteredXContentObject implements Ser private static final Logger logger = LogManager.getLogger(CohereServiceSettings.class); // Production key rate limits for all endpoints: https://docs.cohere.com/docs/going-live#production-key-specifications // 10K requests a minute - private static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(10_000); + public static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(10_000); public static CohereServiceSettings fromMap(Map map, ConfigurationParseContext context) { ValidationException validationException = new ValidationException(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java index cc8116a70bcc8..b84b98973bbe5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankModel.java @@ -59,7 +59,7 @@ public CohereRerankModel( new ModelConfigurations(modelId, taskType, service, serviceSettings, taskSettings), new ModelSecrets(secretSettings), secretSettings, - serviceSettings.getCommonSettings() + serviceSettings ); } @@ -100,6 +100,6 @@ public ExecutableAction accept(CohereActionVisitor visitor, Map @Override public URI uri() { - return getServiceSettings().getCommonSettings().uri(); + return getServiceSettings().uri(); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java index 6a74fe533e3db..f7b9a5288b997 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java @@ -7,43 +7,118 @@ package org.elasticsearch.xpack.inference.services.cohere.rerank; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; -import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; +import org.elasticsearch.xpack.inference.services.cohere.CohereRateLimitServiceSettings; +import org.elasticsearch.xpack.inference.services.cohere.CohereService; import org.elasticsearch.xpack.inference.services.settings.FilteredXContentObject; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import java.io.IOException; +import java.net.URI; import java.util.Map; import java.util.Objects; -public class CohereRerankServiceSettings extends FilteredXContentObject implements ServiceSettings { +import static org.elasticsearch.xpack.inference.services.ServiceFields.DIMENSIONS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; +import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.convertToUri; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.createOptionalUri; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; +import static org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings.DEFAULT_RATE_LIMIT_SETTINGS; + +public class CohereRerankServiceSettings extends FilteredXContentObject implements ServiceSettings, CohereRateLimitServiceSettings { public static final String NAME = "cohere_rerank_service_settings"; - public static CohereRerankServiceSettings fromMap(Map map, ConfigurationParseContext parseContext) { + private static final Logger logger = LogManager.getLogger(CohereRerankServiceSettings.class); + + public static CohereRerankServiceSettings fromMap(Map map, ConfigurationParseContext context) { ValidationException validationException = new ValidationException(); - var commonServiceSettings = CohereServiceSettings.fromMap(map, parseContext); + + String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + + // We need to extract/remove those fields to avoid unknown service settings errors + extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); + removeAsType(map, DIMENSIONS, Integer.class); + removeAsType(map, MAX_INPUT_TOKENS, Integer.class); + + URI uri = convertToUri(url, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); + String modelId = extractOptionalString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + RateLimitSettings rateLimitSettings = RateLimitSettings.of( + map, + DEFAULT_RATE_LIMIT_SETTINGS, + validationException, + CohereService.NAME, + context + ); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new CohereRerankServiceSettings(commonServiceSettings); + return new CohereRerankServiceSettings(uri, modelId, rateLimitSettings); } - private final CohereServiceSettings commonSettings; + private final URI uri; + + private final String modelId; + + private final RateLimitSettings rateLimitSettings; + + public CohereRerankServiceSettings(@Nullable URI uri, @Nullable String modelId, @Nullable RateLimitSettings rateLimitSettings) { + this.uri = uri; + this.modelId = modelId; + this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS); + } - public CohereRerankServiceSettings(CohereServiceSettings commonSettings) { - this.commonSettings = commonSettings; + public CohereRerankServiceSettings(@Nullable String url, @Nullable String modelId, @Nullable RateLimitSettings rateLimitSettings) { + this(createOptionalUri(url), modelId, rateLimitSettings); } public CohereRerankServiceSettings(StreamInput in) throws IOException { - commonSettings = new CohereServiceSettings(in); + this.uri = createOptionalUri(in.readOptionalString()); + + if (in.getTransportVersion().before(TransportVersions.ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED)) { + // An older node sends these fields, so we need to skip them to progress through the serialized data + in.readOptionalEnum(SimilarityMeasure.class); + in.readOptionalVInt(); + in.readOptionalVInt(); + } + + this.modelId = in.readOptionalString(); + + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_RATE_LIMIT_SETTINGS_ADDED)) { + this.rateLimitSettings = new RateLimitSettings(in); + } else { + this.rateLimitSettings = DEFAULT_RATE_LIMIT_SETTINGS; + } + } + + public URI uri() { + return uri; + } + + public String modelId() { + return modelId; + } + + @Override + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; } @Override @@ -55,7 +130,7 @@ public String getWriteableName() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - commonSettings.toXContentFragment(builder, params); + toXContentFragmentOfExposedFields(builder, params); builder.endObject(); return builder; @@ -63,7 +138,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { - commonSettings.toXContentFragmentOfExposedFields(builder, params); + if (uri != null) { + builder.field(URL, uri.toString()); + } + + if (modelId != null) { + builder.field(MODEL_ID, modelId); + } + + rateLimitSettings.toXContent(builder, params); return builder; } @@ -75,23 +158,36 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { - commonSettings.writeTo(out); + var uriToWrite = uri != null ? uri.toString() : null; + out.writeOptionalString(uriToWrite); + + if (out.getTransportVersion().before(TransportVersions.ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED)) { + // An old node expects this data to be present, so we need to send at least the booleans + // indicating that the fields are not set + out.writeOptionalEnum(null); + out.writeOptionalVInt(null); + out.writeOptionalVInt(null); + } + + out.writeOptionalString(modelId); + + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_RATE_LIMIT_SETTINGS_ADDED)) { + rateLimitSettings.writeTo(out); + } } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - CohereRerankServiceSettings that = (CohereRerankServiceSettings) o; - return Objects.equals(commonSettings, that.commonSettings); + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + CohereRerankServiceSettings that = (CohereRerankServiceSettings) object; + return Objects.equals(uri, that.uri) + && Objects.equals(modelId, that.modelId) + && Objects.equals(rateLimitSettings, that.rateLimitSettings); } @Override public int hashCode() { - return Objects.hash(commonSettings); - } - - public CohereServiceSettings getCommonSettings() { - return commonSettings; + return Objects.hash(uri, modelId, rateLimitSettings); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandEmbeddingModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandEmbeddingModel.java new file mode 100644 index 0000000000000..bb4e0c2c513ac --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandEmbeddingModel.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; + +import java.util.Map; + +public class CustomElandEmbeddingModel extends CustomElandModel { + + public CustomElandEmbeddingModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + ConfigurationParseContext context + ) { + this(inferenceEntityId, taskType, service, CustomElandInternalTextEmbeddingServiceSettings.fromMap(serviceSettings, context)); + } + + public CustomElandEmbeddingModel( + String inferenceEntityId, + TaskType taskType, + String service, + CustomElandInternalTextEmbeddingServiceSettings serviceSettings + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings), + serviceSettings.getElasticsearchInternalServiceSettings() + ); + } + + @Override + public CustomElandInternalTextEmbeddingServiceSettings getServiceSettings() { + return (CustomElandInternalTextEmbeddingServiceSettings) super.getServiceSettings(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalServiceSettings.java index c62855c09cff2..0bb45a6c77a8c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalServiceSettings.java @@ -14,17 +14,27 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.services.ServiceUtils; import java.io.IOException; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; + public class CustomElandInternalServiceSettings extends ElasticsearchInternalServiceSettings { public static final String NAME = "custom_eland_model_internal_service_settings"; - public CustomElandInternalServiceSettings(int numAllocations, int numThreads, String modelId) { - super(numAllocations, numThreads, modelId); + public CustomElandInternalServiceSettings( + Integer numAllocations, + int numThreads, + String modelId, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { + super(numAllocations, numThreads, modelId, adaptiveAllocationsSettings); } /** @@ -38,15 +48,22 @@ public CustomElandInternalServiceSettings(int numAllocations, int numThreads, St * @param map Source map containing the config * @return The {@code CustomElandServiceSettings} builder */ - public static Builder fromMap(Map map) { - + public static CustomElandInternalServiceSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); - Integer numAllocations = ServiceUtils.removeAsType(map, NUM_ALLOCATIONS, Integer.class); - Integer numThreads = ServiceUtils.removeAsType(map, NUM_THREADS, Integer.class); - - validateParameters(numAllocations, validationException, numThreads); - String modelId = ServiceUtils.extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + Integer numAllocations = extractOptionalPositiveInteger( + map, + NUM_ALLOCATIONS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + Integer numThreads = extractRequiredPositiveInteger(map, NUM_THREADS, ModelConfigurations.SERVICE_SETTINGS, validationException); + AdaptiveAllocationsSettings adaptiveAllocationsSettings = ServiceUtils.removeAsAdaptiveAllocationsSettings( + map, + ADAPTIVE_ALLOCATIONS, + validationException + ); + String modelId = extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; @@ -55,13 +72,19 @@ public static Builder fromMap(Map map) { var builder = new Builder() { @Override public CustomElandInternalServiceSettings build() { - return new CustomElandInternalServiceSettings(getNumAllocations(), getNumThreads(), getModelId()); + return new CustomElandInternalServiceSettings( + getNumAllocations(), + getNumThreads(), + getModelId(), + getAdaptiveAllocationsSettings() + ); } }; builder.setNumAllocations(numAllocations); builder.setNumThreads(numThreads); builder.setModelId(modelId); - return builder; + builder.setAdaptiveAllocationsSettings(adaptiveAllocationsSettings); + return builder.build(); } @Override @@ -70,7 +93,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } public CustomElandInternalServiceSettings(StreamInput in) throws IOException { - super(in.readVInt(), in.readVInt(), in.readString()); + super( + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) ? in.readOptionalVInt() : in.readVInt(), + in.readVInt(), + in.readString(), + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) + ? in.readOptionalWriteable(AdaptiveAllocationsSettings::new) + : null + ); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalTextEmbeddingServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalTextEmbeddingServiceSettings.java new file mode 100644 index 0000000000000..8413d06045601 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalTextEmbeddingServiceSettings.java @@ -0,0 +1,255 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.DIMENSIONS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.ELEMENT_TYPE; +import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; + +public class CustomElandInternalTextEmbeddingServiceSettings implements ServiceSettings { + + public static final String NAME = "custom_eland_model_internal_text_embedding_service_settings"; + + /** + * Parse the CustomElandServiceSettings from map and validate the setting values. + * + * This method does not verify the model variant + * + * If required setting are missing or the values are invalid an + * {@link ValidationException} is thrown. + * + * @param map Source map containing the config + * @param context The parser context, whether it is from an HTTP request or from persistent storage + * @return The {@code CustomElandServiceSettings} builder + */ + public static CustomElandInternalTextEmbeddingServiceSettings fromMap(Map map, ConfigurationParseContext context) { + return switch (context) { + case REQUEST -> fromRequestMap(map); + case PERSISTENT -> fromPersistedMap(map); + }; + } + + private static CustomElandInternalTextEmbeddingServiceSettings fromRequestMap(Map map) { + ValidationException validationException = new ValidationException(); + var commonFields = commonFieldsFromMap(map, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new CustomElandInternalTextEmbeddingServiceSettings(commonFields); + } + + private static CustomElandInternalTextEmbeddingServiceSettings fromPersistedMap(Map map) { + var commonFields = commonFieldsFromMap(map); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, new ValidationException()); + + return new CustomElandInternalTextEmbeddingServiceSettings(commonFields, dims); + } + + private record CommonFields( + ElasticsearchInternalServiceSettings internalServiceSettings, + SimilarityMeasure similarityMeasure, + DenseVectorFieldMapper.ElementType elementType + ) {} + + private static CommonFields commonFieldsFromMap(Map map) { + return commonFieldsFromMap(map, new ValidationException()); + } + + private static CommonFields commonFieldsFromMap(Map map, ValidationException validationException) { + var internalSettings = ElasticsearchInternalServiceSettings.fromMap(map, validationException); + SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); + DenseVectorFieldMapper.ElementType elementType = extractOptionalEnum( + map, + ELEMENT_TYPE, + ModelConfigurations.SERVICE_SETTINGS, + DenseVectorFieldMapper.ElementType::fromString, + EnumSet.of(DenseVectorFieldMapper.ElementType.BYTE, DenseVectorFieldMapper.ElementType.FLOAT), + validationException + ); + + return new CommonFields( + internalSettings, + Objects.requireNonNullElse(similarity, SimilarityMeasure.COSINE), + Objects.requireNonNullElse(elementType, DenseVectorFieldMapper.ElementType.FLOAT) + ); + } + + private final ElasticsearchInternalServiceSettings internalServiceSettings; + private final Integer dimensions; + private final SimilarityMeasure similarityMeasure; + private final DenseVectorFieldMapper.ElementType elementType; + + public CustomElandInternalTextEmbeddingServiceSettings( + int numAllocations, + int numThreads, + String modelId, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { + this( + numAllocations, + numThreads, + modelId, + adaptiveAllocationsSettings, + null, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ); + } + + public CustomElandInternalTextEmbeddingServiceSettings( + int numAllocations, + int numThreads, + String modelId, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, + Integer dimensions, + SimilarityMeasure similarityMeasure, + DenseVectorFieldMapper.ElementType elementType + ) { + internalServiceSettings = new ElasticsearchInternalServiceSettings( + numAllocations, + numThreads, + modelId, + adaptiveAllocationsSettings + ); + this.dimensions = dimensions; + this.similarityMeasure = Objects.requireNonNull(similarityMeasure); + this.elementType = Objects.requireNonNull(elementType); + } + + public CustomElandInternalTextEmbeddingServiceSettings(StreamInput in) throws IOException { + internalServiceSettings = new ElasticsearchInternalServiceSettings(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_ELAND_SETTINGS_ADDED)) { + dimensions = in.readOptionalVInt(); + similarityMeasure = in.readEnum(SimilarityMeasure.class); + elementType = in.readEnum(DenseVectorFieldMapper.ElementType.class); + } else { + dimensions = null; + similarityMeasure = SimilarityMeasure.COSINE; + elementType = DenseVectorFieldMapper.ElementType.FLOAT; + } + } + + private CustomElandInternalTextEmbeddingServiceSettings(CommonFields commonFields) { + this(commonFields, null); + } + + private CustomElandInternalTextEmbeddingServiceSettings(CommonFields commonFields, Integer dimensions) { + internalServiceSettings = commonFields.internalServiceSettings; + this.dimensions = dimensions; + similarityMeasure = commonFields.similarityMeasure; + elementType = commonFields.elementType; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + internalServiceSettings.addXContentFragment(builder, params); + + if (dimensions != null) { + builder.field(DIMENSIONS, dimensions); + } + + if (similarityMeasure != null) { + builder.field(SIMILARITY, similarityMeasure); + } + + if (elementType != null) { + builder.field(ELEMENT_TYPE, elementType); + } + + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return CustomElandInternalTextEmbeddingServiceSettings.NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_13_0; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + internalServiceSettings.writeTo(out); + + if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_ELAND_SETTINGS_ADDED)) { + out.writeOptionalVInt(dimensions); + out.writeEnum(similarityMeasure); + out.writeEnum(elementType); + } + } + + public ElasticsearchInternalServiceSettings getElasticsearchInternalServiceSettings() { + return internalServiceSettings; + } + + @Override + public DenseVectorFieldMapper.ElementType elementType() { + return elementType; + } + + @Override + public SimilarityMeasure similarity() { + return similarityMeasure; + } + + @Override + public Integer dimensions() { + return dimensions; + } + + @Override + public ToXContentObject getFilteredXContentObject() { + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CustomElandInternalTextEmbeddingServiceSettings that = (CustomElandInternalTextEmbeddingServiceSettings) o; + return Objects.equals(internalServiceSettings, that.internalServiceSettings) + && Objects.equals(dimensions, that.dimensions) + && Objects.equals(similarityMeasure, that.similarityMeasure) + && Objects.equals(elementType, that.elementType); + } + + @Override + public int hashCode() { + return Objects.hash(internalServiceSettings, dimensions, similarityMeasure, elementType); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java index 1f9ec163aa546..703fca8c74c31 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java @@ -9,71 +9,42 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.Model; -import org.elasticsearch.inference.TaskSettings; -import org.elasticsearch.inference.TaskType; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; -import java.util.Map; +import java.util.Objects; import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTED; -public class CustomElandModel extends ElasticsearchModel { +public class CustomElandModel extends Model implements ElasticsearchModel { + private final InternalServiceSettings internalServiceSettings; - public static CustomElandModel build( - String inferenceEntityId, - TaskType taskType, - String service, - CustomElandInternalServiceSettings serviceSettings, - @Nullable TaskSettings taskSettings - ) { - return taskSettings == null - ? new CustomElandModel(inferenceEntityId, taskType, service, serviceSettings) - : new CustomElandModel(inferenceEntityId, taskType, service, serviceSettings, taskSettings); - } - - public CustomElandModel( - String inferenceEntityId, - TaskType taskType, - String service, - CustomElandInternalServiceSettings serviceSettings - ) { - super(inferenceEntityId, taskType, service, serviceSettings); + public CustomElandModel(ModelConfigurations configurations, InternalServiceSettings internalServiceSettings) { + super(configurations); + this.internalServiceSettings = Objects.requireNonNull(internalServiceSettings); } - private CustomElandModel( - String inferenceEntityId, - TaskType taskType, - String service, - CustomElandInternalServiceSettings serviceSettings, - TaskSettings taskSettings - ) { - super(inferenceEntityId, taskType, service, serviceSettings, taskSettings); + public String getModelId() { + return internalServiceSettings.getModelId(); } @Override - public CustomElandInternalServiceSettings getServiceSettings() { - return (CustomElandInternalServiceSettings) super.getServiceSettings(); - } - - @Override - StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { - var startRequest = new StartTrainedModelDeploymentAction.Request( - this.getServiceSettings().getModelId(), - this.getInferenceEntityId() - ); - startRequest.setNumberOfAllocations(this.getServiceSettings().getNumAllocations()); - startRequest.setThreadsPerAllocation(this.getServiceSettings().getNumThreads()); + public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { + var startRequest = new StartTrainedModelDeploymentAction.Request(internalServiceSettings.getModelId(), this.getInferenceEntityId()); + startRequest.setNumberOfAllocations(internalServiceSettings.getNumAllocations()); + startRequest.setThreadsPerAllocation(internalServiceSettings.getNumThreads()); + startRequest.setAdaptiveAllocationsSettings(internalServiceSettings.getAdaptiveAllocationsSettings()); startRequest.setWaitForState(STARTED); return startRequest; } @Override - ActionListener getCreateTrainedModelAssignmentActionListener( + public ActionListener getCreateTrainedModelAssignmentActionListener( Model model, ActionListener listener ) { @@ -92,7 +63,7 @@ public void onFailure(Exception e) { "Could not start the TextEmbeddingService service as the " + "custom eland model [{0}] for this platform cannot be found." + " Custom models need to be loaded into the cluster with eland before they can be started.", - getServiceSettings().getModelId() + getModelId() ) ); return; @@ -101,12 +72,4 @@ public void onFailure(Exception e) { } }; } - - public static TaskSettings taskSettingsFromMap(TaskType taskType, Map taskSettingsMap) { - if (TaskType.RERANK.equals(taskType)) { - return CustomElandRerankTaskSettings.defaultsFromMap(taskSettingsMap); - } - - return null; - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java new file mode 100644 index 0000000000000..d880450739319 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankModel.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; + +import java.util.Map; + +public class CustomElandRerankModel extends CustomElandModel { + + public CustomElandRerankModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + service, + CustomElandInternalServiceSettings.fromMap(serviceSettings), + CustomElandRerankTaskSettings.defaultsFromMap(taskSettings) + ); + } + + // default for testing + CustomElandRerankModel( + String inferenceEntityId, + TaskType taskType, + String service, + CustomElandInternalServiceSettings serviceSettings, + CustomElandRerankTaskSettings taskSettings + ) { + super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), serviceSettings); + } + + @Override + public CustomElandInternalServiceSettings getServiceSettings() { + return (CustomElandInternalServiceSettings) super.getServiceSettings(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index dbc36960a8231..9dc88be16ddbb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -46,6 +46,8 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; import java.io.IOException; @@ -127,15 +129,17 @@ private void customElandCase( + "]. You may need to load it into the cluster using eland." ); } else { - var customElandInternalServiceSettings = (CustomElandInternalServiceSettings) CustomElandInternalServiceSettings.fromMap( - serviceSettingsMap - ).build(); - throwIfNotEmptyMap(serviceSettingsMap, name()); + var model = createCustomElandModel( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + ConfigurationParseContext.REQUEST + ); - var taskSettings = CustomElandModel.taskSettingsFromMap(taskType, taskSettingsMap); + throwIfNotEmptyMap(serviceSettingsMap, name()); throwIfNotEmptyMap(taskSettingsMap, name()); - var model = CustomElandModel.build(inferenceEntityId, taskType, name(), customElandInternalServiceSettings, taskSettings); delegate.onResponse(model); } }); @@ -143,6 +147,20 @@ private void customElandCase( client.execute(GetTrainedModelsAction.INSTANCE, request, getModelsListener); } + private static CustomElandModel createCustomElandModel( + String inferenceEntityId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + ConfigurationParseContext context + ) { + return switch (taskType) { + case TEXT_EMBEDDING -> new CustomElandEmbeddingModel(inferenceEntityId, taskType, NAME, serviceSettings, context); + case RERANK -> new CustomElandRerankModel(inferenceEntityId, taskType, NAME, serviceSettings, taskSettings, context); + default -> throw new ElasticsearchStatusException(TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), RestStatus.BAD_REQUEST); + }; + } + private void e5Case( String inferenceEntityId, TaskType taskType, @@ -187,7 +205,7 @@ private static boolean modelVariantDoesNotMatchArchitecturesAndIsNotPlatformAgno } @Override - public ElasticsearchModel parsePersistedConfigWithSecrets( + public Model parsePersistedConfigWithSecrets( String inferenceEntityId, TaskType taskType, Map config, @@ -197,7 +215,7 @@ public ElasticsearchModel parsePersistedConfigWithSecrets( } @Override - public ElasticsearchModel parsePersistedConfig(String inferenceEntityId, TaskType taskType, Map config) { + public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, Map config) { Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); Map taskSettingsMap = removeFromMap(config, ModelConfigurations.TASK_SETTINGS); @@ -214,14 +232,59 @@ public ElasticsearchModel parsePersistedConfig(String inferenceEntityId, TaskTyp (MultilingualE5SmallInternalServiceSettings) MultilingualE5SmallInternalServiceSettings.fromMap(serviceSettingsMap).build() ); } else { - var serviceSettings = (CustomElandInternalServiceSettings) CustomElandInternalServiceSettings.fromMap(serviceSettingsMap) - .build(); - var taskSettings = CustomElandModel.taskSettingsFromMap(taskType, taskSettingsMap); + return createCustomElandModel( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + ConfigurationParseContext.PERSISTENT + ); + } + } + + @Override + public void checkModelConfig(Model model, ActionListener listener) { + if (model instanceof CustomElandEmbeddingModel elandModel && elandModel.getTaskType() == TaskType.TEXT_EMBEDDING) { + // At this point the inference endpoint configuration has not been persisted yet, if we attempt to do inference using the + // inference id we'll get an error because the trained model code needs to use the persisted inference endpoint to retrieve the + // model id. To get around this we'll have the getEmbeddingSize() method use the model id instead of inference id. So we need + // to create a temporary model that overrides the inference id with the model id. + var temporaryModelWithModelId = new CustomElandEmbeddingModel( + elandModel.getModelId(), + elandModel.getTaskType(), + elandModel.getConfigurations().getService(), + elandModel.getServiceSettings() + ); - return CustomElandModel.build(inferenceEntityId, taskType, name(), serviceSettings, taskSettings); + ServiceUtils.getEmbeddingSize( + temporaryModelWithModelId, + this, + listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(elandModel, size))) + ); + } else { + listener.onResponse(model); } } + private static CustomElandEmbeddingModel updateModelWithEmbeddingDetails(CustomElandEmbeddingModel model, int embeddingSize) { + CustomElandInternalTextEmbeddingServiceSettings serviceSettings = new CustomElandInternalTextEmbeddingServiceSettings( + model.getServiceSettings().getElasticsearchInternalServiceSettings().getNumAllocations(), + model.getServiceSettings().getElasticsearchInternalServiceSettings().getNumThreads(), + model.getServiceSettings().getElasticsearchInternalServiceSettings().getModelId(), + model.getServiceSettings().getElasticsearchInternalServiceSettings().getAdaptiveAllocationsSettings(), + embeddingSize, + model.getServiceSettings().similarity(), + model.getServiceSettings().elementType() + ); + + return new CustomElandEmbeddingModel( + model.getInferenceEntityId(), + model.getTaskType(), + model.getConfigurations().getService(), + serviceSettings + ); + } + @Override public void infer( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java index a384dfe9a2c90..f1255519590cb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java @@ -9,21 +9,69 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; +import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; public class ElasticsearchInternalServiceSettings extends InternalServiceSettings { public static final String NAME = "text_embedding_internal_service_settings"; + private static final int FAILED_INT_PARSE_VALUE = -1; + + public static ElasticsearchInternalServiceSettings fromMap(Map map, ValidationException validationException) { + Integer numAllocations = extractOptionalPositiveInteger( + map, + NUM_ALLOCATIONS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + Integer numThreads = extractRequiredPositiveInteger(map, NUM_THREADS, ModelConfigurations.SERVICE_SETTINGS, validationException); + AdaptiveAllocationsSettings adaptiveAllocationsSettings = ServiceUtils.removeAsAdaptiveAllocationsSettings( + map, + ADAPTIVE_ALLOCATIONS, + validationException + ); + String modelId = extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + + // if an error occurred while parsing, we'll set these to an invalid value, so we don't accidentally get a + // null pointer when doing unboxing + return new ElasticsearchInternalServiceSettings( + numAllocations, + Objects.requireNonNullElse(numThreads, FAILED_INT_PARSE_VALUE), + modelId, + adaptiveAllocationsSettings + ); + } - public ElasticsearchInternalServiceSettings(int numAllocations, int numThreads, String modelVariant) { - super(numAllocations, numThreads, modelVariant); + public ElasticsearchInternalServiceSettings( + Integer numAllocations, + int numThreads, + String modelVariant, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { + super(numAllocations, numThreads, modelVariant, adaptiveAllocationsSettings); } public ElasticsearchInternalServiceSettings(StreamInput in) throws IOException { - super(in.readVInt(), in.readVInt(), in.readString()); + super( + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) ? in.readOptionalVInt() : in.readVInt(), + in.readVInt(), + in.readString(), + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) + ? in.readOptionalWriteable(AdaptiveAllocationsSettings::new) + : null + ); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java index dc6561ba992fe..627e570b24163 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java @@ -9,41 +9,15 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.Model; -import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.TaskSettings; -import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; -public abstract class ElasticsearchModel extends Model { +public interface ElasticsearchModel { + String getModelId(); - public ElasticsearchModel( - String inferenceEntityId, - TaskType taskType, - String service, - ElasticsearchInternalServiceSettings serviceSettings - ) { - super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings)); - } + StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest(); - public ElasticsearchModel( - String inferenceEntityId, - TaskType taskType, - String service, - ElasticsearchInternalServiceSettings serviceSettings, - TaskSettings taskSettings - ) { - super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings)); - } - - @Override - public ElasticsearchInternalServiceSettings getServiceSettings() { - return (ElasticsearchInternalServiceSettings) super.getServiceSettings(); - } - - abstract StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest(); - - abstract ActionListener getCreateTrainedModelAssignmentActionListener( + ActionListener getCreateTrainedModelAssignmentActionListener( Model model, ActionListener listener ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettings.java index d514ca6a917d4..169082b3d4a9d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettings.java @@ -7,19 +7,26 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredPositiveInteger; + public class MultilingualE5SmallInternalServiceSettings extends ElasticsearchInternalServiceSettings { public static final String NAME = "multilingual_e5_small_service_settings"; @@ -27,12 +34,24 @@ public class MultilingualE5SmallInternalServiceSettings extends ElasticsearchInt static final int DIMENSIONS = 384; static final SimilarityMeasure SIMILARITY = SimilarityMeasure.COSINE; - public MultilingualE5SmallInternalServiceSettings(int numAllocations, int numThreads, String modelId) { - super(numAllocations, numThreads, modelId); + public MultilingualE5SmallInternalServiceSettings( + Integer numAllocations, + int numThreads, + String modelId, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { + super(numAllocations, numThreads, modelId, adaptiveAllocationsSettings); } public MultilingualE5SmallInternalServiceSettings(StreamInput in) throws IOException { - super(in.readVInt(), in.readVInt(), in.readString()); + super( + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) ? in.readOptionalVInt() : in.readVInt(), + in.readVInt(), + in.readString(), + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) + ? in.readOptionalWriteable(AdaptiveAllocationsSettings::new) + : null + ); } /** @@ -56,11 +75,26 @@ public static MultilingualE5SmallInternalServiceSettings.Builder fromMap(Map map, ValidationException validationException) { - Integer numAllocations = ServiceUtils.removeAsType(map, NUM_ALLOCATIONS, Integer.class); - Integer numThreads = ServiceUtils.removeAsType(map, NUM_THREADS, Integer.class); - - validateParameters(numAllocations, validationException, numThreads); - + Integer numAllocations = extractOptionalPositiveInteger( + map, + NUM_ALLOCATIONS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + Integer numThreads = extractRequiredPositiveInteger(map, NUM_THREADS, ModelConfigurations.SERVICE_SETTINGS, validationException); + AdaptiveAllocationsSettings adaptiveAllocationsSettings = ServiceUtils.removeAsAdaptiveAllocationsSettings( + map, + ADAPTIVE_ALLOCATIONS, + validationException + ); + if (numAllocations == null && adaptiveAllocationsSettings == null) { + validationException.addValidationError( + ServiceUtils.missingOneOfSettingsErrorMsg( + List.of(NUM_ALLOCATIONS, ADAPTIVE_ALLOCATIONS), + ModelConfigurations.SERVICE_SETTINGS + ) + ); + } String modelId = ServiceUtils.removeAsType(map, MODEL_ID, String.class); if (modelId != null) { if (ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS.contains(modelId) == false) { @@ -73,23 +107,34 @@ private static RequestFields extractRequestFields(Map map, Valid } } - return new RequestFields(numAllocations, numThreads, modelId); + return new RequestFields(numAllocations, numThreads, modelId, adaptiveAllocationsSettings); } private static MultilingualE5SmallInternalServiceSettings.Builder createBuilder(RequestFields requestFields) { var builder = new InternalServiceSettings.Builder() { @Override public MultilingualE5SmallInternalServiceSettings build() { - return new MultilingualE5SmallInternalServiceSettings(getNumAllocations(), getNumThreads(), getModelId()); + return new MultilingualE5SmallInternalServiceSettings( + getNumAllocations(), + getNumThreads(), + getModelId(), + getAdaptiveAllocationsSettings() + ); } }; builder.setNumAllocations(requestFields.numAllocations); builder.setNumThreads(requestFields.numThreads); builder.setModelId(requestFields.modelId); + builder.setAdaptiveAllocationsSettings(requestFields.adaptiveAllocationsSettings); return builder; } - private record RequestFields(@Nullable Integer numAllocations, @Nullable Integer numThreads, @Nullable String modelId) {} + private record RequestFields( + @Nullable Integer numAllocations, + @Nullable Integer numThreads, + @Nullable String modelId, + @Nullable AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) {} @Override public boolean isFragment() { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java index 86be6a04b213d..f22118d00cc29 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java @@ -10,6 +10,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; @@ -17,7 +18,7 @@ import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTED; -public class MultilingualE5SmallModel extends ElasticsearchModel { +public class MultilingualE5SmallModel extends Model implements ElasticsearchModel { public MultilingualE5SmallModel( String inferenceEntityId, @@ -25,7 +26,7 @@ public MultilingualE5SmallModel( String service, MultilingualE5SmallInternalServiceSettings serviceSettings ) { - super(inferenceEntityId, taskType, service, serviceSettings); + super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings)); } @Override @@ -34,20 +35,26 @@ public MultilingualE5SmallInternalServiceSettings getServiceSettings() { } @Override - StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { + public String getModelId() { + return getServiceSettings().getModelId(); + } + + @Override + public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { var startRequest = new StartTrainedModelDeploymentAction.Request( this.getServiceSettings().getModelId(), this.getInferenceEntityId() ); startRequest.setNumberOfAllocations(this.getServiceSettings().getNumAllocations()); startRequest.setThreadsPerAllocation(this.getServiceSettings().getNumThreads()); + startRequest.setAdaptiveAllocationsSettings(this.getServiceSettings().getAdaptiveAllocationsSettings()); startRequest.setWaitForState(STARTED); return startRequest; } @Override - ActionListener getCreateTrainedModelAssignmentActionListener( + public ActionListener getCreateTrainedModelAssignmentActionListener( Model model, ActionListener listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java index 11c97f8b8e37e..54434a7563dab 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalService.java @@ -216,6 +216,7 @@ private static StartTrainedModelDeploymentAction.Request startDeploymentRequest( ); startRequest.setNumberOfAllocations(serviceSettings.getNumAllocations()); startRequest.setThreadsPerAllocation(serviceSettings.getNumThreads()); + startRequest.setAdaptiveAllocationsSettings(serviceSettings.getAdaptiveAllocationsSettings()); startRequest.setWaitForState(STARTED); return startRequest; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettings.java index 21fef51eed077..e0196d378b267 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettings.java @@ -12,13 +12,20 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredPositiveInteger; + public class ElserInternalServiceSettings extends InternalServiceSettings { public static final String NAME = "elser_mlnode_service_settings"; @@ -34,14 +41,31 @@ public class ElserInternalServiceSettings extends InternalServiceSettings { */ public static ElserInternalServiceSettings.Builder fromMap(Map map) { ValidationException validationException = new ValidationException(); - Integer numAllocations = ServiceUtils.removeAsType(map, NUM_ALLOCATIONS, Integer.class); - Integer numThreads = ServiceUtils.removeAsType(map, NUM_THREADS, Integer.class); - validateParameters(numAllocations, validationException, numThreads); + Integer numAllocations = extractOptionalPositiveInteger( + map, + NUM_ALLOCATIONS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + Integer numThreads = extractRequiredPositiveInteger(map, NUM_THREADS, ModelConfigurations.SERVICE_SETTINGS, validationException); + AdaptiveAllocationsSettings adaptiveAllocationsSettings = ServiceUtils.removeAsAdaptiveAllocationsSettings( + map, + ADAPTIVE_ALLOCATIONS, + validationException + ); + if (numAllocations == null && adaptiveAllocationsSettings == null) { + validationException.addValidationError( + ServiceUtils.missingOneOfSettingsErrorMsg( + List.of(NUM_ALLOCATIONS, ADAPTIVE_ALLOCATIONS), + ModelConfigurations.SERVICE_SETTINGS + ) + ); + } + String modelId = extractOptionalString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); - String model_id = ServiceUtils.removeAsType(map, MODEL_ID, String.class); - if (model_id != null && ElserInternalService.VALID_ELSER_MODEL_IDS.contains(model_id) == false) { - validationException.addValidationError("unknown ELSER model id [" + model_id + "]"); + if (modelId != null && ElserInternalService.VALID_ELSER_MODEL_IDS.contains(modelId) == false) { + validationException.addValidationError("unknown ELSER model id [" + modelId + "]"); } if (validationException.validationErrors().isEmpty() == false) { @@ -51,25 +75,39 @@ public static ElserInternalServiceSettings.Builder fromMap(Map m var builder = new InternalServiceSettings.Builder() { @Override public ElserInternalServiceSettings build() { - return new ElserInternalServiceSettings(getNumAllocations(), getNumThreads(), getModelId()); + return new ElserInternalServiceSettings( + getNumAllocations(), + getNumThreads(), + getModelId(), + getAdaptiveAllocationsSettings() + ); } }; builder.setNumAllocations(numAllocations); builder.setNumThreads(numThreads); - builder.setModelId(model_id); + builder.setAdaptiveAllocationsSettings(adaptiveAllocationsSettings); + builder.setModelId(modelId); return builder; } - public ElserInternalServiceSettings(int numAllocations, int numThreads, String modelId) { - super(numAllocations, numThreads, modelId); + public ElserInternalServiceSettings( + Integer numAllocations, + int numThreads, + String modelId, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { + super(numAllocations, numThreads, modelId, adaptiveAllocationsSettings); Objects.requireNonNull(modelId); } public ElserInternalServiceSettings(StreamInput in) throws IOException { super( + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) ? in.readOptionalVInt() : in.readVInt(), in.readVInt(), - in.readVInt(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X) ? in.readString() : ElserInternalService.ELSER_V2_MODEL + in.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X) ? in.readString() : ElserInternalService.ELSER_V2_MODEL, + in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) + ? in.readOptionalWriteable(AdaptiveAllocationsSettings::new) + : null ); } @@ -85,16 +123,23 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(getNumAllocations()); + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalVInt(getNumAllocations()); + } else { + out.writeVInt(getNumAllocations()); + } out.writeVInt(getNumThreads()); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { out.writeString(getModelId()); } + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalWriteable(getAdaptiveAllocationsSettings()); + } } @Override public int hashCode() { - return Objects.hash(NAME, getNumAllocations(), getNumThreads(), getModelId()); + return Objects.hash(NAME, getNumAllocations(), getNumThreads(), getModelId(), getAdaptiveAllocationsSettings()); } @Override @@ -104,6 +149,7 @@ public boolean equals(Object o) { ElserInternalServiceSettings that = (ElserInternalServiceSettings) o; return getNumAllocations() == that.getNumAllocations() && getNumThreads() == that.getNumThreads() - && Objects.equals(getModelId(), that.getModelId()); + && Objects.equals(getModelId(), that.getModelId()) + && Objects.equals(getAdaptiveAllocationsSettings(), that.getAdaptiveAllocationsSettings()); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java index 19d0a5fe0a317..911ccd33690d4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java @@ -23,7 +23,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.googleaistudio.GoogleAiStudioActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java new file mode 100644 index 0000000000000..17e6ec2152e7e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai; + +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; + +import java.util.Map; +import java.util.Objects; + +public abstract class GoogleVertexAiModel extends Model { + + private final GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings; + + public GoogleVertexAiModel( + ModelConfigurations configurations, + ModelSecrets secrets, + GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings + ) { + super(configurations, secrets); + + this.rateLimitServiceSettings = Objects.requireNonNull(rateLimitServiceSettings); + } + + public GoogleVertexAiModel(GoogleVertexAiModel model, ServiceSettings serviceSettings) { + super(model, serviceSettings); + + rateLimitServiceSettings = model.rateLimitServiceSettings(); + } + + public abstract ExecutableAction accept(GoogleVertexAiActionVisitor creator, Map taskSettings); + + public GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings() { + return rateLimitServiceSettings; + } + +} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/IntervalScripting.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiRateLimitServiceSettings.java similarity index 51% rename from x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/IntervalScripting.java rename to x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiRateLimitServiceSettings.java index 121696f1df4f9..f7c7865a5a8f4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/scalar/IntervalScripting.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiRateLimitServiceSettings.java @@ -5,15 +5,12 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.expression.function.scalar; +package org.elasticsearch.xpack.inference.services.googlevertexai; -// FIXME: accessor interface until making script generation pluggable -public interface IntervalScripting { +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; - String script(); +public interface GoogleVertexAiRateLimitServiceSettings { - String value(); - - String typeName(); + RateLimitSettings rateLimitSettings(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettings.java new file mode 100644 index 0000000000000..57c8d61f9f9a5 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettings.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SecretSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredSecureString; + +public class GoogleVertexAiSecretSettings implements SecretSettings { + + public static final String NAME = "google_vertex_ai_secret_settings"; + + public static final String SERVICE_ACCOUNT_JSON = "service_account_json"; + + private final SecureString serviceAccountJson; + + public static GoogleVertexAiSecretSettings fromMap(@Nullable Map map) { + if (map == null) { + return null; + } + + ValidationException validationException = new ValidationException(); + SecureString secureServiceAccountJson = extractRequiredSecureString( + map, + SERVICE_ACCOUNT_JSON, + ModelSecrets.SECRET_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new GoogleVertexAiSecretSettings(secureServiceAccountJson); + } + + public GoogleVertexAiSecretSettings(SecureString serviceAccountJson) { + this.serviceAccountJson = Objects.requireNonNull(serviceAccountJson); + } + + public GoogleVertexAiSecretSettings(StreamInput in) throws IOException { + this(in.readSecureString()); + } + + public SecureString serviceAccountJson() { + return serviceAccountJson; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.field(SERVICE_ACCOUNT_JSON, serviceAccountJson.toString()); + + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_GOOGLE_VERTEX_AI_EMBEDDINGS_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeSecureString(serviceAccountJson); + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + GoogleVertexAiSecretSettings that = (GoogleVertexAiSecretSettings) object; + return Objects.equals(serviceAccountJson, that.serviceAccountJson); + } + + @Override + public int hashCode() { + return Objects.hash(serviceAccountJson); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java new file mode 100644 index 0000000000000..f7a8055a90abb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java @@ -0,0 +1,295 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionCreator; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.QueryAndDocsInputs; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.SenderService; +import org.elasticsearch.xpack.inference.services.ServiceComponents; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrDefaultEmpty; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; +import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.EMBEDDING_MAX_BATCH_SIZE; + +public class GoogleVertexAiService extends SenderService { + + public static final String NAME = "googlevertexai"; + + public GoogleVertexAiService(HttpRequestSender.Factory factory, ServiceComponents serviceComponents) { + super(factory, serviceComponents); + } + + @Override + public String name() { + return NAME; + } + + @Override + public void parseRequestConfig( + String inferenceEntityId, + TaskType taskType, + Map config, + Set platformArchitectures, + ActionListener parseModelListener + ) { + try { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + + GoogleVertexAiModel model = createModel( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + serviceSettingsMap, + TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), + ConfigurationParseContext.REQUEST + ); + + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); + + parseModelListener.onResponse(model); + } catch (Exception e) { + parseModelListener.onFailure(e); + } + } + + @Override + public Model parsePersistedConfigWithSecrets( + String inferenceEntityId, + TaskType taskType, + Map config, + Map secrets + ) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + Map secretSettingsMap = removeFromMapOrDefaultEmpty(secrets, ModelSecrets.SECRET_SETTINGS); + + return createModelFromPersistent( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + secretSettingsMap, + parsePersistedConfigErrorMsg(inferenceEntityId, NAME) + ); + } + + @Override + public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, Map config) { + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + + return createModelFromPersistent( + inferenceEntityId, + taskType, + serviceSettingsMap, + taskSettingsMap, + null, + parsePersistedConfigErrorMsg(inferenceEntityId, NAME) + ); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_GOOGLE_VERTEX_AI_EMBEDDINGS_ADDED; + } + + @Override + public void checkModelConfig(Model model, ActionListener listener) { + if (model instanceof GoogleVertexAiEmbeddingsModel embeddingsModel) { + ServiceUtils.getEmbeddingSize( + model, + this, + listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) + ); + } else { + listener.onResponse(model); + } + } + + @Override + protected void doInfer( + Model model, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + if (model instanceof GoogleVertexAiModel == false) { + listener.onFailure(createInvalidModelException(model)); + return; + } + + GoogleVertexAiModel googleVertexAiModel = (GoogleVertexAiModel) model; + + var actionCreator = new GoogleVertexAiActionCreator(getSender(), getServiceComponents()); + + var action = googleVertexAiModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(input), timeout, listener); + } + + @Override + protected void doInfer( + Model model, + String query, + List input, + Map taskSettings, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { + if (model instanceof GoogleVertexAiModel == false) { + listener.onFailure(createInvalidModelException(model)); + return; + } + + GoogleVertexAiModel googleVertexAiModel = (GoogleVertexAiModel) model; + var actionCreator = new GoogleVertexAiActionCreator(getSender(), getServiceComponents()); + + var action = googleVertexAiModel.accept(actionCreator, taskSettings); + action.execute(new QueryAndDocsInputs(query, input), timeout, listener); + } + + @Override + protected void doChunkedInfer( + Model model, + String query, + List input, + Map taskSettings, + InputType inputType, + ChunkingOptions chunkingOptions, + TimeValue timeout, + ActionListener> listener + ) { + GoogleVertexAiModel googleVertexAiModel = (GoogleVertexAiModel) model; + var actionCreator = new GoogleVertexAiActionCreator(getSender(), getServiceComponents()); + + var batchedRequests = new EmbeddingRequestChunker(input, EMBEDDING_MAX_BATCH_SIZE, EmbeddingRequestChunker.EmbeddingType.FLOAT) + .batchRequestsWithListeners(listener); + for (var request : batchedRequests) { + var action = googleVertexAiModel.accept(actionCreator, taskSettings); + action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); + } + } + + private GoogleVertexAiEmbeddingsModel updateModelWithEmbeddingDetails(GoogleVertexAiEmbeddingsModel model, int embeddingSize) { + if (model.getServiceSettings().dimensionsSetByUser() + && model.getServiceSettings().dimensions() != null + && model.getServiceSettings().dimensions() != embeddingSize) { + throw new ElasticsearchStatusException( + Strings.format( + "The retrieved embeddings size [%s] does not match the size specified in the settings [%s]. " + + "Please recreate the [%s] configuration with the correct dimensions", + embeddingSize, + model.getServiceSettings().dimensions(), + model.getConfigurations().getInferenceEntityId() + ), + RestStatus.BAD_REQUEST + ); + } + + GoogleVertexAiEmbeddingsServiceSettings serviceSettings = new GoogleVertexAiEmbeddingsServiceSettings( + model.getServiceSettings().location(), + model.getServiceSettings().projectId(), + model.getServiceSettings().modelId(), + model.getServiceSettings().dimensionsSetByUser(), + model.getServiceSettings().maxInputTokens(), + embeddingSize, + model.getServiceSettings().similarity(), + model.getServiceSettings().rateLimitSettings() + ); + + return new GoogleVertexAiEmbeddingsModel(model, serviceSettings); + } + + private static GoogleVertexAiModel createModelFromPersistent( + String inferenceEntityId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + Map secretSettings, + String failureMessage + ) { + return createModel( + inferenceEntityId, + taskType, + serviceSettings, + taskSettings, + secretSettings, + failureMessage, + ConfigurationParseContext.PERSISTENT + ); + } + + private static GoogleVertexAiModel createModel( + String inferenceEntityId, + TaskType taskType, + Map serviceSettings, + Map taskSettings, + @Nullable Map secretSettings, + String failureMessage, + ConfigurationParseContext context + ) { + return switch (taskType) { + case TEXT_EMBEDDING -> new GoogleVertexAiEmbeddingsModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings, + context + ); + case RERANK -> new GoogleVertexAiRerankModel( + inferenceEntityId, + taskType, + NAME, + serviceSettings, + taskSettings, + secretSettings, + context + ); + default -> throw new ElasticsearchStatusException(failureMessage, RestStatus.BAD_REQUEST); + }; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceFields.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceFields.java new file mode 100644 index 0000000000000..c669155a6cf2c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceFields.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai; + +public class GoogleVertexAiServiceFields { + + public static final String LOCATION = "location"; + + public static final String PROJECT_ID = "project_id"; + + /** + * In `us-central-1` the max input size is `250`, but in every other region it's `5` according + * to these docs: https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-text-embeddings. + * + * Therefore, being conservative and setting it to `5`. + */ + static final int EMBEDDING_MAX_BATCH_SIZE = 5; + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java new file mode 100644 index 0000000000000..99110045fc3da --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.apache.http.client.utils.URIBuilder; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; +import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiUtils; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; + +import static org.elasticsearch.core.Strings.format; + +public class GoogleVertexAiEmbeddingsModel extends GoogleVertexAiModel { + + private URI uri; + + public GoogleVertexAiEmbeddingsModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + Map secrets, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + service, + GoogleVertexAiEmbeddingsServiceSettings.fromMap(serviceSettings, context), + GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettings), + GoogleVertexAiSecretSettings.fromMap(secrets) + ); + } + + public GoogleVertexAiEmbeddingsModel(GoogleVertexAiEmbeddingsModel model, GoogleVertexAiEmbeddingsServiceSettings serviceSettings) { + super(model, serviceSettings); + } + + // Should only be used directly for testing + GoogleVertexAiEmbeddingsModel( + String inferenceEntityId, + TaskType taskType, + String service, + GoogleVertexAiEmbeddingsServiceSettings serviceSettings, + GoogleVertexAiEmbeddingsTaskSettings taskSettings, + @Nullable GoogleVertexAiSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), + new ModelSecrets(secrets), + serviceSettings + ); + try { + this.uri = buildUri(serviceSettings.location(), serviceSettings.projectId(), serviceSettings.modelId()); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + } + + // Should only be used directly for testing + protected GoogleVertexAiEmbeddingsModel( + String inferenceEntityId, + TaskType taskType, + String service, + String uri, + GoogleVertexAiEmbeddingsServiceSettings serviceSettings, + GoogleVertexAiEmbeddingsTaskSettings taskSettings, + @Nullable GoogleVertexAiSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), + new ModelSecrets(secrets), + serviceSettings + ); + try { + this.uri = new URI(uri); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + } + + @Override + public GoogleVertexAiEmbeddingsServiceSettings getServiceSettings() { + return (GoogleVertexAiEmbeddingsServiceSettings) super.getServiceSettings(); + } + + @Override + public GoogleVertexAiEmbeddingsTaskSettings getTaskSettings() { + return (GoogleVertexAiEmbeddingsTaskSettings) super.getTaskSettings(); + } + + @Override + public GoogleVertexAiSecretSettings getSecretSettings() { + return (GoogleVertexAiSecretSettings) super.getSecretSettings(); + } + + @Override + public GoogleVertexAiEmbeddingsRateLimitServiceSettings rateLimitServiceSettings() { + return (GoogleVertexAiEmbeddingsRateLimitServiceSettings) super.rateLimitServiceSettings(); + } + + public URI uri() { + return uri; + } + + @Override + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { + return visitor.create(this, taskSettings); + } + + public static URI buildUri(String location, String projectId, String modelId) throws URISyntaxException { + return new URIBuilder().setScheme("https") + .setHost(format("%s%s", location, GoogleVertexAiUtils.GOOGLE_VERTEX_AI_HOST_SUFFIX)) + .setPathSegments( + GoogleVertexAiUtils.V1, + GoogleVertexAiUtils.PROJECTS, + projectId, + GoogleVertexAiUtils.LOCATIONS, + location, + GoogleVertexAiUtils.PUBLISHERS, + GoogleVertexAiUtils.PUBLISHER_GOOGLE, + GoogleVertexAiUtils.MODELS, + format("%s:%s", modelId, GoogleVertexAiUtils.PREDICT) + ) + .build(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRateLimitServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRateLimitServiceSettings.java new file mode 100644 index 0000000000000..a95860b1793d5 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRateLimitServiceSettings.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiRateLimitServiceSettings; + +public interface GoogleVertexAiEmbeddingsRateLimitServiceSettings extends GoogleVertexAiRateLimitServiceSettings { + + String projectId(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java new file mode 100644 index 0000000000000..14a67a64377e2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.core.Nullable; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; + +public record GoogleVertexAiEmbeddingsRequestTaskSettings(@Nullable Boolean autoTruncate) { + + public static final GoogleVertexAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + + public static GoogleVertexAiEmbeddingsRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + Boolean autoTruncate = extractOptionalBoolean(map, GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsServiceSettings.java new file mode 100644 index 0000000000000..f4bf40d290399 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsServiceSettings.java @@ -0,0 +1,274 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiService; +import org.elasticsearch.xpack.inference.services.settings.FilteredXContentObject; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.DIMENSIONS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; +import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; +import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.LOCATION; +import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.PROJECT_ID; + +public class GoogleVertexAiEmbeddingsServiceSettings extends FilteredXContentObject + implements + ServiceSettings, + GoogleVertexAiEmbeddingsRateLimitServiceSettings { + + public static final String NAME = "google_vertex_ai_embeddings_service_settings"; + + public static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user"; + + // See online prediction requests per minute: https://cloud.google.com/vertex-ai/docs/quotas. + private static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(30_000); + + public static GoogleVertexAiEmbeddingsServiceSettings fromMap(Map map, ConfigurationParseContext context) { + ValidationException validationException = new ValidationException(); + + String location = extractRequiredString(map, LOCATION, ModelConfigurations.SERVICE_SETTINGS, validationException); + String projectId = extractRequiredString(map, PROJECT_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + String model = extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + Integer maxInputTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + SimilarityMeasure similarityMeasure = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); + RateLimitSettings rateLimitSettings = RateLimitSettings.of( + map, + DEFAULT_RATE_LIMIT_SETTINGS, + validationException, + GoogleVertexAiService.NAME, + context + ); + + Boolean dimensionsSetByUser = extractOptionalBoolean(map, DIMENSIONS_SET_BY_USER, validationException); + + switch (context) { + case REQUEST -> { + if (dimensionsSetByUser != null) { + validationException.addValidationError( + ServiceUtils.invalidSettingError(DIMENSIONS_SET_BY_USER, ModelConfigurations.SERVICE_SETTINGS) + ); + } + dimensionsSetByUser = dims != null; + } + case PERSISTENT -> { + if (dimensionsSetByUser == null) { + validationException.addValidationError( + ServiceUtils.missingSettingErrorMsg(DIMENSIONS_SET_BY_USER, ModelConfigurations.SERVICE_SETTINGS) + ); + } + } + } + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new GoogleVertexAiEmbeddingsServiceSettings( + location, + projectId, + model, + dimensionsSetByUser, + maxInputTokens, + dims, + similarityMeasure, + rateLimitSettings + ); + } + + private final String location; + + private final String projectId; + + private final String modelId; + + private final Integer dims; + + private final SimilarityMeasure similarity; + private final Integer maxInputTokens; + + private final RateLimitSettings rateLimitSettings; + + private final Boolean dimensionsSetByUser; + + public GoogleVertexAiEmbeddingsServiceSettings( + String location, + String projectId, + String modelId, + Boolean dimensionsSetByUser, + @Nullable Integer maxInputTokens, + @Nullable Integer dims, + @Nullable SimilarityMeasure similarity, + @Nullable RateLimitSettings rateLimitSettings + ) { + this.location = location; + this.projectId = projectId; + this.modelId = modelId; + this.dimensionsSetByUser = dimensionsSetByUser; + this.maxInputTokens = maxInputTokens; + this.dims = dims; + this.similarity = Objects.requireNonNullElse(similarity, SimilarityMeasure.DOT_PRODUCT); + this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS); + } + + public GoogleVertexAiEmbeddingsServiceSettings(StreamInput in) throws IOException { + this.location = in.readString(); + this.projectId = in.readString(); + this.modelId = in.readString(); + this.dimensionsSetByUser = in.readBoolean(); + this.maxInputTokens = in.readOptionalVInt(); + this.dims = in.readOptionalVInt(); + this.similarity = in.readOptionalEnum(SimilarityMeasure.class); + this.rateLimitSettings = new RateLimitSettings(in); + } + + @Override + public String projectId() { + return projectId; + } + + public String location() { + return location; + } + + public String modelId() { + return modelId; + } + + public Boolean dimensionsSetByUser() { + return dimensionsSetByUser; + } + + @Override + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; + } + + public Integer maxInputTokens() { + return maxInputTokens; + } + + @Override + public Integer dimensions() { + return dims; + } + + @Override + public SimilarityMeasure similarity() { + return similarity; + } + + @Override + public DenseVectorFieldMapper.ElementType elementType() { + return DenseVectorFieldMapper.ElementType.FLOAT; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + toXContentFragmentOfExposedFields(builder, params); + builder.field(DIMENSIONS_SET_BY_USER, dimensionsSetByUser); + + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_GOOGLE_VERTEX_AI_EMBEDDINGS_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(location); + out.writeString(projectId); + out.writeString(modelId); + out.writeBoolean(dimensionsSetByUser); + out.writeOptionalVInt(maxInputTokens); + out.writeOptionalVInt(dims); + out.writeOptionalEnum(similarity); + rateLimitSettings.writeTo(out); + } + + @Override + protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + builder.field(LOCATION, location); + builder.field(PROJECT_ID, projectId); + builder.field(MODEL_ID, modelId); + + if (maxInputTokens != null) { + builder.field(MAX_INPUT_TOKENS, maxInputTokens); + } + + if (dims != null) { + builder.field(DIMENSIONS, dims); + } + + if (similarity != null) { + builder.field(SIMILARITY, similarity); + } + + rateLimitSettings.toXContent(builder, params); + + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + GoogleVertexAiEmbeddingsServiceSettings that = (GoogleVertexAiEmbeddingsServiceSettings) object; + return Objects.equals(location, that.location) + && Objects.equals(projectId, that.projectId) + && Objects.equals(modelId, that.modelId) + && Objects.equals(dims, that.dims) + && similarity == that.similarity + && Objects.equals(maxInputTokens, that.maxInputTokens) + && Objects.equals(rateLimitSettings, that.rateLimitSettings) + && Objects.equals(dimensionsSetByUser, that.dimensionsSetByUser); + } + + @Override + public int hashCode() { + return Objects.hash(location, projectId, modelId, dims, similarity, maxInputTokens, rateLimitSettings, dimensionsSetByUser); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java new file mode 100644 index 0000000000000..6de44fe470a2f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; + +public class GoogleVertexAiEmbeddingsTaskSettings implements TaskSettings { + + public static final String NAME = "google_vertex_ai_embeddings_task_settings"; + + public static final String AUTO_TRUNCATE = "auto_truncate"; + + public static final GoogleVertexAiEmbeddingsTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsTaskSettings( + Boolean.valueOf(null) + ); + + public static GoogleVertexAiEmbeddingsTaskSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + Boolean autoTruncate = extractOptionalBoolean(map, AUTO_TRUNCATE, validationException); + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + } + + public static GoogleVertexAiEmbeddingsTaskSettings of( + GoogleVertexAiEmbeddingsTaskSettings originalSettings, + GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings + ) { + var autoTruncate = requestSettings.autoTruncate() == null ? originalSettings.autoTruncate : requestSettings.autoTruncate(); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + } + + private final Boolean autoTruncate; + + public GoogleVertexAiEmbeddingsTaskSettings(@Nullable Boolean autoTruncate) { + this.autoTruncate = autoTruncate; + } + + public GoogleVertexAiEmbeddingsTaskSettings(StreamInput in) throws IOException { + this.autoTruncate = in.readOptionalBoolean(); + } + + public Boolean autoTruncate() { + return autoTruncate; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_GOOGLE_VERTEX_AI_EMBEDDINGS_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalBoolean(this.autoTruncate); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (autoTruncate != null) { + builder.field(AUTO_TRUNCATE, autoTruncate); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + GoogleVertexAiEmbeddingsTaskSettings that = (GoogleVertexAiEmbeddingsTaskSettings) object; + return Objects.equals(autoTruncate, that.autoTruncate); + } + + @Override + public int hashCode() { + return Objects.hash(autoTruncate); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleDiscoveryEngineRateLimitServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleDiscoveryEngineRateLimitServiceSettings.java new file mode 100644 index 0000000000000..850942602248f --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleDiscoveryEngineRateLimitServiceSettings.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiRateLimitServiceSettings; + +public interface GoogleDiscoveryEngineRateLimitServiceSettings extends GoogleVertexAiRateLimitServiceSettings { + String projectId(); +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java new file mode 100644 index 0000000000000..45fad977a2b6b --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.apache.http.client.utils.URIBuilder; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.inference.external.action.ExecutableAction; +import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; +import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiUtils; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; + +import static org.elasticsearch.core.Strings.format; + +public class GoogleVertexAiRerankModel extends GoogleVertexAiModel { + + private URI uri; + + public GoogleVertexAiRerankModel( + String inferenceEntityId, + TaskType taskType, + String service, + Map serviceSettings, + Map taskSettings, + Map secrets, + ConfigurationParseContext context + ) { + this( + inferenceEntityId, + taskType, + service, + GoogleVertexAiRerankServiceSettings.fromMap(serviceSettings, context), + GoogleVertexAiRerankTaskSettings.fromMap(taskSettings), + GoogleVertexAiSecretSettings.fromMap(secrets) + ); + } + + public GoogleVertexAiRerankModel(GoogleVertexAiRerankModel model, GoogleVertexAiRerankServiceSettings serviceSettings) { + super(model, serviceSettings); + } + + // Should only be used directly for testing + GoogleVertexAiRerankModel( + String inferenceEntityId, + TaskType taskType, + String service, + GoogleVertexAiRerankServiceSettings serviceSettings, + GoogleVertexAiRerankTaskSettings taskSettings, + @Nullable GoogleVertexAiSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), + new ModelSecrets(secrets), + serviceSettings + ); + try { + this.uri = buildUri(serviceSettings.projectId()); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + } + + // Should only be used directly for testing + protected GoogleVertexAiRerankModel( + String inferenceEntityId, + TaskType taskType, + String service, + String uri, + GoogleVertexAiRerankServiceSettings serviceSettings, + GoogleVertexAiRerankTaskSettings taskSettings, + @Nullable GoogleVertexAiSecretSettings secrets + ) { + super( + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), + new ModelSecrets(secrets), + serviceSettings + ); + try { + this.uri = new URI(uri); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + } + + @Override + public GoogleVertexAiRerankServiceSettings getServiceSettings() { + return (GoogleVertexAiRerankServiceSettings) super.getServiceSettings(); + } + + @Override + public GoogleVertexAiRerankTaskSettings getTaskSettings() { + return (GoogleVertexAiRerankTaskSettings) super.getTaskSettings(); + } + + @Override + public GoogleVertexAiSecretSettings getSecretSettings() { + return (GoogleVertexAiSecretSettings) super.getSecretSettings(); + } + + @Override + public GoogleDiscoveryEngineRateLimitServiceSettings rateLimitServiceSettings() { + return (GoogleDiscoveryEngineRateLimitServiceSettings) super.rateLimitServiceSettings(); + } + + public URI uri() { + return uri; + } + + @Override + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { + return visitor.create(this, taskSettings); + } + + public static URI buildUri(String projectId) throws URISyntaxException { + return new URIBuilder().setScheme("https") + .setHost(GoogleVertexAiUtils.GOOGLE_DISCOVERY_ENGINE_HOST) + .setPathSegments( + GoogleVertexAiUtils.V1, + GoogleVertexAiUtils.PROJECTS, + projectId, + GoogleVertexAiUtils.LOCATIONS, + GoogleVertexAiUtils.GLOBAL, + GoogleVertexAiUtils.RANKING_CONFIGS, + format("%s:%s", GoogleVertexAiUtils.DEFAULT_RANKING_CONFIG, GoogleVertexAiUtils.RANK) + ) + .build(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankRequestTaskSettings.java new file mode 100644 index 0000000000000..5cb1acd8038f7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankRequestTaskSettings.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; + +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; + +public record GoogleVertexAiRerankRequestTaskSettings(@Nullable Integer topN) { + + public static final GoogleVertexAiRerankRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiRerankRequestTaskSettings(null); + + public static GoogleVertexAiRerankRequestTaskSettings fromMap(Map map) { + if (map.isEmpty()) { + return GoogleVertexAiRerankRequestTaskSettings.EMPTY_SETTINGS; + } + + ValidationException validationException = new ValidationException(); + + Integer topN = extractOptionalPositiveInteger( + map, + GoogleVertexAiRerankTaskSettings.TOP_N, + ModelConfigurations.TASK_SETTINGS, + validationException + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new GoogleVertexAiRerankRequestTaskSettings(topN); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankServiceSettings.java new file mode 100644 index 0000000000000..0a0271d611a71 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankServiceSettings.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiService; +import org.elasticsearch.xpack.inference.services.settings.FilteredXContentObject; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; +import static org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields.PROJECT_ID; + +public class GoogleVertexAiRerankServiceSettings extends FilteredXContentObject + implements + ServiceSettings, + GoogleDiscoveryEngineRateLimitServiceSettings { + + public static final String NAME = "google_vertex_ai_rerank_service_settings"; + + // Query requests per project per minute: https://cloud.google.com/generative-ai-app-builder/quotas#request_quotas + private static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(300); + + public static GoogleVertexAiRerankServiceSettings fromMap(Map map, ConfigurationParseContext context) { + ValidationException validationException = new ValidationException(); + + String projectId = extractRequiredString(map, PROJECT_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + String model = extractOptionalString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); + RateLimitSettings rateLimitSettings = RateLimitSettings.of( + map, + DEFAULT_RATE_LIMIT_SETTINGS, + validationException, + GoogleVertexAiService.NAME, + context + ); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new GoogleVertexAiRerankServiceSettings(projectId, model, rateLimitSettings); + } + + private final String projectId; + + private final String modelId; + + private final RateLimitSettings rateLimitSettings; + + public GoogleVertexAiRerankServiceSettings(String projectId, @Nullable String modelId, @Nullable RateLimitSettings rateLimitSettings) { + this.projectId = Objects.requireNonNull(projectId); + this.modelId = modelId; + this.rateLimitSettings = Objects.requireNonNullElse(rateLimitSettings, DEFAULT_RATE_LIMIT_SETTINGS); + } + + public GoogleVertexAiRerankServiceSettings(StreamInput in) throws IOException { + this.projectId = in.readString(); + this.modelId = in.readOptionalString(); + this.rateLimitSettings = new RateLimitSettings(in); + } + + public String projectId() { + return projectId; + } + + public String modelId() { + return modelId; + } + + @Override + public RateLimitSettings rateLimitSettings() { + return rateLimitSettings; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_GOOGLE_VERTEX_AI_RERANKING_ADDED; + } + + @Override + protected XContentBuilder toXContentFragmentOfExposedFields(XContentBuilder builder, Params params) throws IOException { + builder.field(PROJECT_ID, projectId); + + if (modelId != null) { + builder.field(MODEL_ID, modelId); + } + + rateLimitSettings.toXContent(builder, params); + + return builder; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + toXContentFragmentOfExposedFields(builder, params); + + builder.endObject(); + + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(projectId); + out.writeOptionalString(modelId); + rateLimitSettings.writeTo(out); + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + GoogleVertexAiRerankServiceSettings that = (GoogleVertexAiRerankServiceSettings) object; + return Objects.equals(projectId, that.projectId) + && Objects.equals(modelId, that.modelId) + && Objects.equals(rateLimitSettings, that.rateLimitSettings); + } + + @Override + public int hashCode() { + return Objects.hash(projectId, modelId, rateLimitSettings); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankTaskSettings.java new file mode 100644 index 0000000000000..e7c20cc0b4fea --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankTaskSettings.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; + +public class GoogleVertexAiRerankTaskSettings implements TaskSettings { + + public static final String NAME = "google_vertex_ai_rerank_task_settings"; + + public static final String TOP_N = "top_n"; + + public static GoogleVertexAiRerankTaskSettings fromMap(Map map) { + ValidationException validationException = new ValidationException(); + + Integer topN = extractOptionalPositiveInteger(map, TOP_N, ModelConfigurations.TASK_SETTINGS, validationException); + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return new GoogleVertexAiRerankTaskSettings(topN); + } + + public static GoogleVertexAiRerankTaskSettings of( + GoogleVertexAiRerankTaskSettings originalSettings, + GoogleVertexAiRerankRequestTaskSettings requestSettings + ) { + var topN = requestSettings.topN() == null ? originalSettings.topN() : requestSettings.topN(); + return new GoogleVertexAiRerankTaskSettings(topN); + } + + private final Integer topN; + + public GoogleVertexAiRerankTaskSettings(@Nullable Integer topN) { + this.topN = topN; + } + + public GoogleVertexAiRerankTaskSettings(StreamInput in) throws IOException { + this.topN = in.readOptionalVInt(); + } + + public Integer topN() { + return topN; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_GOOGLE_VERTEX_AI_RERANKING_ADDED; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(topN); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (topN != null) { + builder.field(TOP_N, topN); + } + + builder.endObject(); + + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) return true; + if (object == null || getClass() != object.getClass()) return false; + GoogleVertexAiRerankTaskSettings that = (GoogleVertexAiRerankTaskSettings) object; + return Objects.equals(topN, that.topN); + } + + @Override + public int hashCode() { + return Objects.hash(topN); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index 161ab6c47bfeb..7a591f094982d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -17,9 +17,10 @@ import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.huggingface.HuggingFaceActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; @@ -78,9 +79,14 @@ public void checkModelConfig(Model model, ActionListener listener) { } private static HuggingFaceEmbeddingsModel updateModelWithEmbeddingDetails(HuggingFaceEmbeddingsModel model, int embeddingSize) { + // default to cosine similarity + var similarity = model.getServiceSettings().similarity() == null + ? SimilarityMeasure.COSINE + : model.getServiceSettings().similarity(); + var serviceSettings = new HuggingFaceServiceSettings( model.getServiceSettings().uri(), - model.getServiceSettings().similarity(), // we don't know the similarity but use whatever the user specified + similarity, embeddingSize, model.getTokenLimit(), model.getServiceSettings().rateLimitSettings() diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java index fc31b1e518dd9..eb9c99f5bfd91 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceSettings.java @@ -33,9 +33,9 @@ import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; import static org.elasticsearch.xpack.inference.services.ServiceUtils.convertToUri; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createUri; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; -import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; public class HuggingFaceServiceSettings extends FilteredXContentObject implements ServiceSettings, HuggingFaceRateLimitServiceSettings { public static final String NAME = "hugging_face_service_settings"; @@ -49,8 +49,13 @@ public static HuggingFaceServiceSettings fromMap(Map map, Config var uri = extractUri(map, URL, validationException); SimilarityMeasure similarityMeasure = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); - Integer dims = removeAsType(map, DIMENSIONS, Integer.class); - Integer maxInputTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); + Integer maxInputTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); RateLimitSettings rateLimitSettings = RateLimitSettings.of( map, DEFAULT_RATE_LIMIT_SETTINGS, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java index bcef31031cb0c..d85b2b095ba2c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java @@ -22,7 +22,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.mistral.MistralActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettings.java index 62d06a4e0029c..2e4d546e1dc4c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettings.java @@ -33,7 +33,6 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; -import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; import static org.elasticsearch.xpack.inference.services.mistral.MistralConstants.MODEL_FIELD; public class MistralEmbeddingsServiceSettings extends FilteredXContentObject implements ServiceSettings { @@ -67,7 +66,7 @@ public static MistralEmbeddingsServiceSettings fromMap(Map map, MistralService.NAME, context ); - Integer dims = removeAsType(map, DIMENSIONS, Integer.class); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 8e25d4a8936ab..d2264ce5cd881 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -24,7 +24,7 @@ import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.inference.common.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.openai.OpenAiActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java index 04f77da1b1463..c4ab8bd99b8b0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java @@ -32,9 +32,9 @@ import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; import static org.elasticsearch.xpack.inference.services.ServiceUtils.convertToUri; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createOptionalUri; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; -import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.ORGANIZATION; /** @@ -58,7 +58,12 @@ public static OpenAiChatCompletionServiceSettings fromMap(Map ma String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); URI uri = convertToUri(url, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); - Integer maxInputTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class); + Integer maxInputTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); RateLimitSettings rateLimitSettings = RateLimitSettings.of( map, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java index 080251bf1ba3a..d474e935fbda7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java @@ -36,6 +36,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceFields.URL; import static org.elasticsearch.xpack.inference.services.ServiceUtils.convertToUri; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createOptionalUri; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; @@ -99,8 +100,13 @@ private static CommonFields fromMap( String url = extractOptionalString(map, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); String organizationId = extractOptionalString(map, ORGANIZATION, ModelConfigurations.SERVICE_SETTINGS, validationException); SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); - Integer maxInputTokens = removeAsType(map, MAX_INPUT_TOKENS, Integer.class); - Integer dims = removeAsType(map, DIMENSIONS, Integer.class); + Integer maxInputTokens = extractOptionalPositiveInteger( + map, + MAX_INPUT_TOKENS, + ModelConfigurations.SERVICE_SETTINGS, + validationException + ); + Integer dims = extractOptionalPositiveInteger(map, DIMENSIONS, ModelConfigurations.SERVICE_SETTINGS, validationException); URI uri = convertToUri(url, URL, ModelConfigurations.SERVICE_SETTINGS, validationException); String modelId = extractRequiredString(map, MODEL_ID, ModelConfigurations.SERVICE_SETTINGS, validationException); RateLimitSettings rateLimitSettings = RateLimitSettings.of( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/InternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/InternalServiceSettings.java index ee7db662b4997..dbec3907427ba 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/InternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/settings/InternalServiceSettings.java @@ -7,13 +7,12 @@ package org.elasticsearch.xpack.inference.services.settings; -import org.elasticsearch.common.ValidationException; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import java.io.IOException; import java.util.Objects; @@ -23,38 +22,26 @@ public abstract class InternalServiceSettings implements ServiceSettings { public static final String NUM_ALLOCATIONS = "num_allocations"; public static final String NUM_THREADS = "num_threads"; public static final String MODEL_ID = "model_id"; + public static final String ADAPTIVE_ALLOCATIONS = "adaptive_allocations"; - private final int numAllocations; + private final Integer numAllocations; private final int numThreads; private final String modelId; - - public InternalServiceSettings(int numAllocations, int numThreads, String modelId) { + private final AdaptiveAllocationsSettings adaptiveAllocationsSettings; + + public InternalServiceSettings( + Integer numAllocations, + int numThreads, + String modelId, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { this.numAllocations = numAllocations; this.numThreads = numThreads; this.modelId = modelId; + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; } - protected static void validateParameters(Integer numAllocations, ValidationException validationException, Integer numThreads) { - if (numAllocations == null) { - validationException.addValidationError( - ServiceUtils.missingSettingErrorMsg(NUM_ALLOCATIONS, ModelConfigurations.SERVICE_SETTINGS) - ); - } else if (numAllocations < 1) { - validationException.addValidationError( - ServiceUtils.mustBeAPositiveIntegerErrorMessage(NUM_ALLOCATIONS, ModelConfigurations.SERVICE_SETTINGS, numAllocations) - ); - } - - if (numThreads == null) { - validationException.addValidationError(ServiceUtils.missingSettingErrorMsg(NUM_THREADS, ModelConfigurations.SERVICE_SETTINGS)); - } else if (numThreads < 1) { - validationException.addValidationError( - ServiceUtils.mustBeAPositiveIntegerErrorMessage(NUM_THREADS, ModelConfigurations.SERVICE_SETTINGS, numThreads) - ); - } - } - - public int getNumAllocations() { + public Integer getNumAllocations() { return numAllocations; } @@ -66,28 +53,44 @@ public String getModelId() { return modelId; } + public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { + return adaptiveAllocationsSettings; + } + public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; InternalServiceSettings that = (InternalServiceSettings) o; - return numAllocations == that.numAllocations && numThreads == that.numThreads && Objects.equals(modelId, that.modelId); + return Objects.equals(numAllocations, that.numAllocations) + && numThreads == that.numThreads + && Objects.equals(modelId, that.modelId) + && Objects.equals(adaptiveAllocationsSettings, that.adaptiveAllocationsSettings); } @Override public int hashCode() { - return Objects.hash(numAllocations, numThreads, modelId); + return Objects.hash(numAllocations, numThreads, modelId, adaptiveAllocationsSettings); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(NUM_ALLOCATIONS, getNumAllocations()); - builder.field(NUM_THREADS, getNumThreads()); - builder.field(MODEL_ID, getModelId()); + addXContentFragment(builder, params); builder.endObject(); return builder; } + public void addXContentFragment(XContentBuilder builder, Params params) throws IOException { + if (numAllocations != null) { + builder.field(NUM_ALLOCATIONS, getNumAllocations()); + } + builder.field(NUM_THREADS, getNumThreads()); + builder.field(MODEL_ID, getModelId()); + if (adaptiveAllocationsSettings != null) { + builder.field(ADAPTIVE_ALLOCATIONS, getAdaptiveAllocationsSettings()); + } + } + @Override public ToXContentObject getFilteredXContentObject() { return this; @@ -100,19 +103,27 @@ public boolean isFragment() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(getNumAllocations()); + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalVInt(getNumAllocations()); + } else { + out.writeVInt(getNumAllocations()); + } out.writeVInt(getNumThreads()); out.writeString(getModelId()); + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + out.writeOptionalWriteable(getAdaptiveAllocationsSettings()); + } } public abstract static class Builder { - private int numAllocations; + private Integer numAllocations; private int numThreads; private String modelId; + private AdaptiveAllocationsSettings adaptiveAllocationsSettings; public abstract InternalServiceSettings build(); - public void setNumAllocations(int numAllocations) { + public void setNumAllocations(Integer numAllocations) { this.numAllocations = numAllocations; } @@ -124,16 +135,24 @@ public void setModelId(String modelId) { this.modelId = modelId; } + public void setAdaptiveAllocationsSettings(AdaptiveAllocationsSettings adaptiveAllocationsSettings) { + this.adaptiveAllocationsSettings = adaptiveAllocationsSettings; + } + public String getModelId() { return modelId; } - public int getNumAllocations() { + public Integer getNumAllocations() { return numAllocations; } public int getNumThreads() { return numThreads; } + + public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { + return adaptiveAllocationsSettings; + } } } diff --git a/x-pack/plugin/inference/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/inference/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..a39fcf53be7f3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +grant { + // required by: com.google.api.client.json.JsonParser#parseValue + // also required by AWS SDK for client configuration + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "getClassLoader"; + + // required by: com.google.api.client.json.GenericJson# + // also by AWS SDK for Jackson's ObjectMapper + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + + // required to add google certs to the gcs client trustore + permission java.lang.RuntimePermission "setFactory"; + + // gcs client opens socket connections for to access repository + // also, AWS Bedrock client opens socket connections and needs resolve for to access to resources + permission java.net.SocketPermission "*", "connect,resolve"; +}; diff --git a/x-pack/plugin/inference/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/inference/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..f3e40336744d3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.inference.InferenceFeatures diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index a352116278e7a..fe33a3d092667 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -13,11 +13,14 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.threadpool.ScalingExecutorBuilder; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; import org.elasticsearch.xpack.inference.common.Truncator; import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; @@ -27,8 +30,10 @@ import org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; import org.elasticsearch.xpack.inference.registry.ModelRegistry; +import org.hamcrest.Matchers; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -42,6 +47,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -92,10 +98,15 @@ public static void storeSparseModel(Client client) throws Exception { storeModel(client, model); } - public static void storeDenseModel(Client client, int dimensions, SimilarityMeasure similarityMeasure) throws Exception { + public static void storeDenseModel( + Client client, + int dimensions, + SimilarityMeasure similarityMeasure, + DenseVectorFieldMapper.ElementType elementType + ) throws Exception { Model model = new TestDenseInferenceServiceExtension.TestDenseModel( TestDenseInferenceServiceExtension.TestInferenceService.NAME, - new TestDenseInferenceServiceExtension.TestServiceSettings("dense_model", dimensions, similarityMeasure) + new TestDenseInferenceServiceExtension.TestServiceSettings("dense_model", dimensions, similarityMeasure, elementType) ); storeModel(client, model); @@ -161,4 +172,56 @@ public static SimilarityMeasure randomSimilarityMeasure() { } public record PersistedConfig(Map config, Map secrets) {} + + public static PersistedConfig getPersistedConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + var secrets = secretSettings == null ? null : new HashMap(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)); + + return new PersistedConfig( + new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), + secrets + ); + } + + public static PersistedConfig getPersistedConfigMap(Map serviceSettings) { + return Utils.getPersistedConfigMap(serviceSettings, new HashMap<>(), null); + } + + public static PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { + return new PersistedConfig( + new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), + null + ); + } + + public static Map getRequestConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + var builtServiceSettings = new HashMap<>(); + builtServiceSettings.putAll(serviceSettings); + builtServiceSettings.putAll(secretSettings); + + return new HashMap<>( + Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) + ); + } + + public static Map buildExpectationCompletions(List completions) { + return Map.of( + ChatCompletionResults.COMPLETION, + completions.stream().map(completion -> Map.of(ChatCompletionResults.Result.RESULT, completion)).collect(Collectors.toList()) + ); + } + + public static ActionListener getModelListenerForException(Class exceptionClass, String expectedMessage) { + return ActionListener.wrap((model) -> fail("Model parsing should have failed"), e -> { + assertThat(e, Matchers.instanceOf(exceptionClass)); + assertThat(e.getMessage(), is(expectedMessage)); + }); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index d501c9a65d80e..f63a6369b21a6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -205,13 +205,13 @@ public void testItemFailures() throws Exception { @SuppressWarnings({ "unchecked", "rawtypes" }) public void testManyRandomDocs() throws Exception { Map inferenceModelMap = new HashMap<>(); - int numModels = randomIntBetween(1, 5); + int numModels = randomIntBetween(1, 3); for (int i = 0; i < numModels; i++) { StaticModel model = StaticModel.createRandomInstance(); inferenceModelMap.put(model.getInferenceEntityId(), model); } - int numInferenceFields = randomIntBetween(1, 5); + int numInferenceFields = randomIntBetween(1, 3); Map inferenceFieldMap = new HashMap<>(); for (int i = 0; i < numInferenceFields; i++) { String field = randomAlphaOfLengthBetween(5, 10); @@ -219,7 +219,7 @@ public void testManyRandomDocs() throws Exception { inferenceFieldMap.put(field, new InferenceFieldMetadata(field, inferenceId, new String[] { field })); } - int numRequests = randomIntBetween(100, 1000); + int numRequests = atLeast(100); BulkItemRequest[] originalRequests = new BulkItemRequest[numRequests]; BulkItemRequest[] modifiedRequests = new BulkItemRequest[numRequests]; for (int id = 0; id < numRequests; id++) { @@ -331,7 +331,7 @@ private static BulkItemRequest[] randomBulkItemRequest( for (var entry : fieldInferenceMap.values()) { String field = entry.getName(); var model = modelMap.get(entry.getInferenceId()); - String text = randomAlphaOfLengthBetween(10, 100); + String text = randomAlphaOfLengthBetween(10, 20); docMap.put(field, text); expectedDocMap.put(field, text); if (model == null) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java similarity index 99% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunkerTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java index facd8dfd9f3b1..cb89846b197fc 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/EmbeddingRequestChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.common; +package org.elasticsearch.xpack.inference.chunking; import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.ChunkedInferenceServiceResults; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java new file mode 100644 index 0000000000000..5bf282a07067a --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.chunking; + +import com.ibm.icu.text.BreakIterator; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.Arrays; +import java.util.Locale; + +import static org.elasticsearch.xpack.inference.chunking.WordBoundaryChunkerTests.TEST_TEXT; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SentenceBoundaryChunkerTests extends ESTestCase { + + public void testChunkSplitLargeChunkSizes() { + for (int maxWordsPerChunk : new int[] { 100, 200 }) { + var chunker = new SentenceBoundaryChunker(); + var chunks = chunker.chunk(TEST_TEXT, maxWordsPerChunk); + + int numChunks = expectedNumberOfChunks(sentenceSizes(TEST_TEXT), maxWordsPerChunk); + assertThat("words per chunk " + maxWordsPerChunk, chunks, hasSize(numChunks)); + + for (var chunk : chunks) { + assertTrue(Character.isUpperCase(chunk.charAt(0))); + var trailingWhiteSpaceRemoved = chunk.strip(); + var lastChar = trailingWhiteSpaceRemoved.charAt(trailingWhiteSpaceRemoved.length() - 1); + assertThat(lastChar, Matchers.is('.')); + } + } + } + + public void testChunk_ChunkSizeLargerThanText() { + int maxWordsPerChunk = 500; + var chunker = new SentenceBoundaryChunker(); + var chunks = chunker.chunk(TEST_TEXT, maxWordsPerChunk); + + assertEquals(chunks.get(0), TEST_TEXT); + } + + public void testChunkSplit_SentencesLongerThanChunkSize() { + var chunkSizes = new int[] { 10, 30, 50 }; + var expectedNumberOFChunks = new int[] { 21, 7, 4 }; + + for (int i = 0; i < chunkSizes.length; i++) { + int maxWordsPerChunk = chunkSizes[i]; + var chunker = new SentenceBoundaryChunker(); + var chunks = chunker.chunk(TEST_TEXT, maxWordsPerChunk); + + assertThat("words per chunk " + maxWordsPerChunk, chunks, hasSize(expectedNumberOFChunks[i])); + for (var chunk : chunks) { + // count whitespaced words + // strip out the '=' signs as they are not counted as words by ICU + var trimmed = chunk.trim().replace("=", ""); + // split by hyphen or whitespace to match the way + // the ICU break iterator counts words + var split = trimmed.split("[\\s\\-]+"); + int numWhiteSpacedWords = (int) Arrays.stream(split).filter(s -> s.isEmpty() == false).count(); + if (chunk.trim().endsWith(".")) { + // End of sentence, may be less than maxWordsPerChunk + assertThat(Arrays.toString(split), numWhiteSpacedWords, lessThanOrEqualTo(maxWordsPerChunk)); + } else { + // splitting inside a sentence so should have max words + assertEquals(Arrays.toString(split), maxWordsPerChunk, numWhiteSpacedWords); + } + } + } + } + + public void testCountWords() { + // Test word count matches the whitespace separated word count. + var splitByWhiteSpaceSentenceSizes = sentenceSizes(TEST_TEXT); + + var sentenceIterator = BreakIterator.getSentenceInstance(Locale.ROOT); + sentenceIterator.setText(TEST_TEXT); + + var wordIterator = BreakIterator.getWordInstance(Locale.ROOT); + wordIterator.setText(TEST_TEXT); + + int start = 0; + int end = sentenceIterator.next(); + assertEquals(splitByWhiteSpaceSentenceSizes[0], SentenceBoundaryChunker.countWords(start, end, wordIterator)); + start = end; + end = sentenceIterator.next(); + assertEquals(splitByWhiteSpaceSentenceSizes[1], SentenceBoundaryChunker.countWords(start, end, wordIterator)); + start = end; + end = sentenceIterator.next(); + assertEquals(splitByWhiteSpaceSentenceSizes[2], SentenceBoundaryChunker.countWords(start, end, wordIterator)); + start = end; + end = sentenceIterator.next(); + assertEquals(splitByWhiteSpaceSentenceSizes[3], SentenceBoundaryChunker.countWords(start, end, wordIterator)); + + assertEquals(BreakIterator.DONE, sentenceIterator.next()); + } + + public void testCountWords_short() { + // Test word count matches the whitespace separated word count. + var text = "This is a short sentence. Followed by another."; + + var sentenceIterator = BreakIterator.getSentenceInstance(Locale.ROOT); + sentenceIterator.setText(text); + + var wordIterator = BreakIterator.getWordInstance(Locale.ROOT); + wordIterator.setText(text); + + int start = 0; + int end = sentenceIterator.next(); + assertEquals(5, SentenceBoundaryChunker.countWords(0, end, wordIterator)); + start = end; + end = sentenceIterator.next(); + assertEquals(3, SentenceBoundaryChunker.countWords(start, end, wordIterator)); + assertEquals(BreakIterator.DONE, sentenceIterator.next()); + } + + public void testCountWords_WithSymbols() { + { + var text = "foo != bar"; + var wordIterator = BreakIterator.getWordInstance(Locale.ROOT); + wordIterator.setText(text); + // "foo", "bar" - "!=" is not counted + assertEquals(2, SentenceBoundaryChunker.countWords(0, text.length(), wordIterator)); + } + { + var text = "foo & bar"; + var wordIterator = BreakIterator.getWordInstance(Locale.ROOT); + wordIterator.setText(text); + // "foo", "bar" - the & is not counted + assertEquals(2, SentenceBoundaryChunker.countWords(0, text.length(), wordIterator)); + } + { + var text = "m&s"; + var wordIterator = BreakIterator.getWordInstance(Locale.ROOT); + wordIterator.setText(text); + // "m", "s" - the & is not counted + assertEquals(2, SentenceBoundaryChunker.countWords(0, text.length(), wordIterator)); + } + } + + private int[] sentenceSizes(String text) { + var sentences = text.split("\\.\\s+"); + var lengths = new int[sentences.length]; + for (int i = 0; i < sentences.length; i++) { + // strip out the '=' signs as they are not counted as words by ICU + sentences[i] = sentences[i].replace("=", ""); + // split by hyphen or whitespace to match the way + // the ICU break iterator counts words + lengths[i] = sentences[i].split("[ \\-]+").length; + } + return lengths; + } + + private int expectedNumberOfChunks(int[] sentenceLengths, int maxWordsPerChunk) { + int numChunks = 1; + int runningWordCount = 0; + for (int i = 0; i < sentenceLengths.length; i++) { + if (runningWordCount + sentenceLengths[i] > maxWordsPerChunk) { + numChunks++; + runningWordCount = sentenceLengths[i]; + } else { + runningWordCount += sentenceLengths[i]; + } + } + return numChunks; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/WordBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java similarity index 87% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/WordBoundaryChunkerTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java index 14cb63673e174..864d01507ca35 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/WordBoundaryChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java @@ -5,11 +5,14 @@ * 2.0. */ -package org.elasticsearch.xpack.inference.common; +package org.elasticsearch.xpack.inference.chunking; + +import com.ibm.icu.text.BreakIterator; import org.elasticsearch.test.ESTestCase; import java.util.List; +import java.util.Locale; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -17,15 +20,17 @@ public class WordBoundaryChunkerTests extends ESTestCase { - private final String TEST_TEXT = "Word segmentation is the problem of dividing a string of written language into its component words.\n" - + "In English and many other languages using some form of the Latin alphabet, the space is a good approximation of a word divider " - + "(word delimiter), although this concept has limits because of the variability with which languages emically regard collocations " - + "and compounds. Many English compound nouns are variably written (for example, ice box = ice-box = icebox; pig sty = pig-sty = " - + "pigsty) with a corresponding variation in whether speakers think of them as noun phrases or single nouns; there are trends in " - + "how norms are set, such as that open compounds often tend eventually to solidify by widespread convention, but variation remains" - + " systemic. In contrast, German compound nouns show less orthographic variation, with solidification being a stronger norm."; - - private final String[] MULTI_LINGUAL = new String[] { + @SuppressWarnings("checkstyle:linelength") + public static final String TEST_TEXT = + "Word segmentation is the problem of dividing a string of written language into its component words.\n" + + "In English and many other languages using some form of the Latin alphabet, the space is a good approximation of a word divider " + + "(word delimiter), although this concept has limits because of the variability with which languages emically regard collocations " + + "and compounds. Many English compound nouns are variably written (for example, ice box = ice-box = icebox; pig sty = pig-sty = " + + "pigsty) with a corresponding variation in whether speakers think of them as noun phrases or single nouns; there are trends in " + + "how norms are set, such as that open compounds often tend eventually to solidify by widespread convention, but variation remains" + + " systemic. In contrast, German compound nouns show less orthographic variation, with solidification being a stronger norm."; + + public static final String[] MULTI_LINGUAL = new String[] { "Građevne strukture Mesa Verde dokaz su akumuliranog znanja i vještina koje su se stoljećima prenosile generacijama civilizacije" + " Anasazi. Vrhunce svojih dosega ostvarili su u 12. i 13. stoljeću, kada su sagrađene danas najpoznatije građevine na " + "liticama. Zidali su obrađenim pješčenjakom, tvrđim kamenom oblikovanim do veličine štruce kruha. Kao žbuku između ciglā " @@ -48,6 +53,17 @@ public class WordBoundaryChunkerTests extends ESTestCase { + " خليفہ المومنين يا خليفہ المسلمين يا صحابی يا رضي الله عنه چئي۔ (ب) آنحضور ﷺ جي گھروارين کان علاوه ڪنھن کي ام المومنين " + "چئي۔ (ج) آنحضور ﷺ جي خاندان جي اھل بيت کان علاوہڍه ڪنھن کي اھل بيت چئي۔ (د) پنھنجي عبادت گاھ کي مسجد چئي۔" }; + public static int NUM_WORDS_IN_TEST_TEXT; + static { + var wordIterator = BreakIterator.getWordInstance(Locale.ROOT); + wordIterator.setText(TEST_TEXT); + int wordCount = 0; + while (wordIterator.next() != BreakIterator.DONE) { + wordCount++; + } + NUM_WORDS_IN_TEST_TEXT = wordCount; + } + public void testSingleSplit() { var chunker = new WordBoundaryChunker(); var chunks = chunker.chunk(TEST_TEXT, 10_000, 0); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java new file mode 100644 index 0000000000000..87d3a82b4aae6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/amazonbedrock/AmazonBedrockActionCreatorTests.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockMockRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockActionCreatorTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + threadPool = createThreadPool(inferenceUtilityPool()); + } + + @After + public void shutdown() throws IOException { + terminate(threadPool); + } + + public void testEmbeddingsRequestAction() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedFloatResults = List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.0123F, -0.0123F })); + var mockedResult = new InferenceTextEmbeddingFloatResults(mockedFloatResults); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + null, + null, + null, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.0123F, -0.0123F })))); + + assertThat(sender.sendCount(), is(1)); + var sentInputs = sender.getInputs(); + assertThat(sentInputs.size(), is(1)); + assertThat(sentInputs.get(0), is("abc")); + } + } + + public void testEmbeddingsRequestAction_HandlesException() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedResult = new ElasticsearchException("mock exception"); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(sender.sendCount(), is(1)); + assertThat(sender.getInputs().size(), is(1)); + assertThat(thrownException.getMessage(), is("mock exception")); + } + } + + public void testCompletionRequestAction() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedChatCompletionResults = List.of(new ChatCompletionResults.Result("test input string")); + var mockedResult = new ChatCompletionResults(mockedChatCompletionResults); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockChatCompletionModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + null, + null, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("test input string")))); + + assertThat(sender.sendCount(), is(1)); + var sentInputs = sender.getInputs(); + assertThat(sentInputs.size(), is(1)); + assertThat(sentInputs.get(0), is("abc")); + } + } + + public void testChatCompletionRequestAction_HandlesException() throws IOException { + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var mockedResult = new ElasticsearchException("mock exception"); + try (var sender = new AmazonBedrockMockRequestSender()) { + sender.enqueue(mockedResult); + var creator = new AmazonBedrockActionCreator(sender, serviceComponents, TIMEOUT); + var model = AmazonBedrockChatCompletionModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + null, + null, + "accesskey", + "secretkey" + ); + var action = creator.create(model, Map.of()); + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(sender.sendCount(), is(1)); + assertThat(sender.getInputs().size(), is(1)); + assertThat(thrownException.getMessage(), is("mock exception")); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreatorTests.java new file mode 100644 index 0000000000000..a3114300c5ddc --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicActionCreatorTests.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.anthropic; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; +import org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicRequestUtils; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettingsTests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.buildSettingsWithRetryFields; +import static org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests.createSender; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class AnthropicActionCreatorTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testCreate_ChatCompletionModel() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = createSender(senderFactory)) { + sender.start(); + + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": [ + { + "type": "text", + "text": "San Francisco has a cool-summer Mediterranean climate." + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = AnthropicChatCompletionModelTests.createChatCompletionModel(getUrl(webServer), "secret", "model", 0); + var actionCreator = new AnthropicActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 2.0, -3.0, 3); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("San Francisco has a cool-summer Mediterranean climate.")))); + assertThat(webServer.requests(), hasSize(1)); + + var request = webServer.requests().get(0); + + assertNull(request.getUri().getQuery()); + assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(request.getHeader(AnthropicRequestUtils.X_API_KEY), equalTo("secret")); + assertThat(request.getHeader(AnthropicRequestUtils.VERSION), equalTo(AnthropicRequestUtils.ANTHROPIC_VERSION_2023_06_01)); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(6)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("max_tokens"), is(1)); + assertThat(requestMap.get("temperature"), is(2.0)); + assertThat(requestMap.get("top_p"), is(-3.0)); + assertThat(requestMap.get("top_k"), is(3)); + } + } + + public void testCreate_ChatCompletionModel_FailsFromInvalidResponseFormat() throws IOException { + // timeout as zero for no retries + var settings = buildSettingsWithRetryFields( + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueSeconds(0) + ); + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager, settings); + + try (var sender = createSender(senderFactory)) { + sender.start(); + + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content_does_not_exist": [ + { + "type": "text", + "text": "San Francisco has a cool-summer Mediterranean climate." + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = AnthropicChatCompletionModelTests.createChatCompletionModel(getUrl(webServer), "secret", "model", 0); + var actionCreator = new AnthropicActionCreator(sender, createWithEmptySettings(threadPool)); + var overriddenTaskSettings = AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, null, null, null); + var action = actionCreator.create(model, overriddenTaskSettings); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is(format("Failed to send Anthropic chat completions request to [%s]", getUrl(webServer))) + ); + assertThat( + thrownException.getCause().getMessage(), + is("Failed to find required field [content] in Anthropic chat completions response") + ); + + assertThat(webServer.requests(), hasSize(1)); + assertNull(webServer.requests().get(0).getUri().getQuery()); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getHeader(AnthropicRequestUtils.X_API_KEY), equalTo("secret")); + assertThat( + webServer.requests().get(0).getHeader(AnthropicRequestUtils.VERSION), + equalTo(AnthropicRequestUtils.ANTHROPIC_VERSION_2023_06_01) + ); + + var requestMap = entityAsMap(webServer.requests().get(0).getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("max_tokens"), is(1)); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionActionTests.java new file mode 100644 index 0000000000000..ffa0ac307490e --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/anthropic/AnthropicChatCompletionActionTests.java @@ -0,0 +1,242 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.anthropic; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockRequest; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicRequestUtils; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModelTests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests.createSender; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class AnthropicChatCompletionActionTests extends ESTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testExecute_ReturnsSuccessfulResponse() throws IOException { + var senderFactory = new HttpRequestSender.Factory(createWithEmptySettings(threadPool), clientManager, mockClusterServiceEmpty()); + + try (var sender = createSender(senderFactory)) { + sender.start(); + + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": [ + { + "type": "text", + "text": "San Francisco has a cool-summer Mediterranean climate." + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var action = createAction(getUrl(webServer), "secret", "model", 1, sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("San Francisco has a cool-summer Mediterranean climate.")))); + assertThat(webServer.requests(), hasSize(1)); + + MockRequest request = webServer.requests().get(0); + + assertNull(request.getUri().getQuery()); + assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + assertThat(request.getHeader(AnthropicRequestUtils.X_API_KEY), equalTo("secret")); + assertThat(request.getHeader(AnthropicRequestUtils.VERSION), equalTo(AnthropicRequestUtils.ANTHROPIC_VERSION_2023_06_01)); + + var requestMap = entityAsMap(request.getBody()); + assertThat(requestMap.size(), is(3)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("max_tokens"), is(1)); + } + } + + public void testExecute_ThrowsURISyntaxException_ForInvalidUrl() throws IOException { + try (var sender = mock(Sender.class)) { + var thrownException = expectThrows(IllegalArgumentException.class, () -> createAction("^^", "secret", "model", 1, sender)); + assertThat(thrownException.getMessage(), containsString("unable to parse url [^^]")); + } + } + + public void testExecute_ThrowsElasticsearchException() { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "secret", "model", 1, sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("failed")); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "secret", "model", 1, sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat( + thrownException.getMessage(), + is(format("Failed to send Anthropic chat completions request to [%s]", getUrl(webServer))) + ); + } + + public void testExecute_ThrowsException() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "secret", "model", 1, sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat( + thrownException.getMessage(), + is(format("Failed to send Anthropic chat completions request to [%s]", getUrl(webServer))) + ); + } + + public void testExecute_ThrowsException_WhenInputIsGreaterThanOne() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var sender = createSender(senderFactory)) { + sender.start(); + + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": [ + { + "type": "text", + "text": "San Francisco has a cool-summer Mediterranean climate." + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var action = createAction(getUrl(webServer), "secret", "model", 1, sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc", "def")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("Anthropic completions only accepts 1 input")); + assertThat(thrownException.status(), is(RestStatus.BAD_REQUEST)); + } + } + + private AnthropicChatCompletionAction createAction(String url, String apiKey, String modelName, int maxTokens, Sender sender) { + var model = AnthropicChatCompletionModelTests.createChatCompletionModel(url, apiKey, modelName, maxTokens); + + return new AnthropicChatCompletionAction(sender, model, createWithEmptySettings(threadPool)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiEmbeddingsActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiEmbeddingsActionTests.java new file mode 100644 index 0000000000000..17a2c29e195f1 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiEmbeddingsActionTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.googlevertexai; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModelTests.createModel; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class GoogleVertexAiEmbeddingsActionTests extends ESTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + // Successful case tested via end-to-end notebook tests in AppEx repo + + public void testExecute_ThrowsElasticsearchException() { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "location", "projectId", "model", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("failed")); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "location", "projectId", "model", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat( + thrownException.getMessage(), + is(format("Failed to send Google Vertex AI embeddings request to [%s]", getUrl(webServer))) + ); + } + + public void testExecute_ThrowsException() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "location", "projectId", "model", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat( + thrownException.getMessage(), + is(format("Failed to send Google Vertex AI embeddings request to [%s]", getUrl(webServer))) + ); + } + + private GoogleVertexAiEmbeddingsAction createAction(String url, String location, String projectId, String modelName, Sender sender) { + var model = createModel(location, projectId, modelName, url, "{}"); + + return new GoogleVertexAiEmbeddingsAction(sender, model, createWithEmptySettings(threadPool)); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiRerankActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiRerankActionTests.java new file mode 100644 index 0000000000000..b84a6328e9882 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiRerankActionTests.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.action.googlevertexai; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModelTests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class GoogleVertexAiRerankActionTests extends ESTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + // Successful case tested via end-to-end notebook tests in AppEx repo + public void testExecute_ThrowsElasticsearchException() { + var sender = mock(Sender.class); + doThrow(new ElasticsearchException("failed")).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "projectId", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is("failed")); + } + + public void testExecute_ThrowsElasticsearchException_WhenSenderOnFailureIsCalled() { + var sender = mock(Sender.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onFailure(new IllegalStateException("failed")); + + return Void.TYPE; + }).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "projectId", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send Google Vertex AI rerank request to [%s]", getUrl(webServer)))); + } + + public void testExecute_ThrowsException() { + var sender = mock(Sender.class); + doThrow(new IllegalArgumentException("failed")).when(sender).send(any(), any(), any(), any()); + + var action = createAction(getUrl(webServer), "projectId", sender); + + PlainActionFuture listener = new PlainActionFuture<>(); + action.execute(new DocumentsOnlyInput(List.of("abc")), InferenceAction.Request.DEFAULT_TIMEOUT, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + + assertThat(thrownException.getMessage(), is(format("Failed to send Google Vertex AI rerank request to [%s]", getUrl(webServer)))); + } + + private GoogleVertexAiRerankAction createAction(String url, String projectId, Sender sender) { + var model = GoogleVertexAiRerankModelTests.createModel(url, projectId, null); + + return new GoogleVertexAiRerankAction(sender, model, threadPool); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutorTests.java new file mode 100644 index 0000000000000..9326d39cb657c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockExecutorTests.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ContentBlock; +import com.amazonaws.services.bedrockruntime.model.ConverseOutput; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; +import com.amazonaws.services.bedrockruntime.model.Message; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockChatCompletionRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockTitanCompletionRequestEntity; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockEmbeddingsRequest; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings.AmazonBedrockTitanEmbeddingsRequestEntity; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.completion.AmazonBedrockChatCompletionResponseHandler; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.embeddings.AmazonBedrockEmbeddingsResponseHandler; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; + +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.Charset; +import java.util.List; + +import static org.elasticsearch.xpack.inference.common.TruncatorTests.createTruncator; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockExecutorTests extends ESTestCase { + public void testExecute_EmbeddingsRequest_ForAmazonTitan() throws CharacterCodingException { + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + var truncator = createTruncator(); + var truncatedInput = truncator.truncate(List.of("abc")); + var requestEntity = new AmazonBedrockTitanEmbeddingsRequestEntity("abc"); + var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, model, requestEntity, null); + var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(null, getTestInvokeResult(TEST_AMAZON_TITAN_EMBEDDINGS_RESULT), null); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockEmbeddingsExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + var result = listener.actionGet(new TimeValue(30000)); + assertNotNull(result); + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.456F, 0.678F, 0.789F })))); + } + + public void testExecute_EmbeddingsRequest_ForCohere() throws CharacterCodingException { + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.COHERE, + "accesskey", + "secretkey" + ); + var requestEntity = new AmazonBedrockTitanEmbeddingsRequestEntity("abc"); + var truncator = createTruncator(); + var truncatedInput = truncator.truncate(List.of("abc")); + var request = new AmazonBedrockEmbeddingsRequest(truncator, truncatedInput, model, requestEntity, null); + var responseHandler = new AmazonBedrockEmbeddingsResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(null, getTestInvokeResult(TEST_COHERE_EMBEDDINGS_RESULT), null); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockEmbeddingsExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + var result = listener.actionGet(new TimeValue(30000)); + assertNotNull(result); + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.456F, 0.678F, 0.789F })))); + } + + public void testExecute_ChatCompletionRequest() throws CharacterCodingException { + var model = AmazonBedrockChatCompletionModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + var requestEntity = new AmazonBedrockTitanCompletionRequestEntity(List.of("abc"), null, null, 512); + var request = new AmazonBedrockChatCompletionRequest(model, requestEntity, null); + var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(getTestConverseResult("converse result"), null, null); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockChatCompletionExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + var result = listener.actionGet(new TimeValue(30000)); + assertNotNull(result); + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("converse result")))); + } + + public void testExecute_FailsProperly_WithElasticsearchException() { + var model = AmazonBedrockChatCompletionModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + var requestEntity = new AmazonBedrockTitanCompletionRequestEntity(List.of("abc"), null, null, 512); + var request = new AmazonBedrockChatCompletionRequest(model, requestEntity, null); + var responseHandler = new AmazonBedrockChatCompletionResponseHandler(); + + var clientCache = new AmazonBedrockMockClientCache(null, null, new ElasticsearchException("test exception")); + var listener = new PlainActionFuture(); + + var executor = new AmazonBedrockChatCompletionExecutor(request, responseHandler, logger, () -> false, listener, clientCache); + executor.run(); + + var exceptionThrown = assertThrows(ElasticsearchException.class, () -> listener.actionGet(new TimeValue(30000))); + assertThat(exceptionThrown.getMessage(), containsString("Failed to send request from inference entity id [id]")); + assertThat(exceptionThrown.getCause().getMessage(), containsString("test exception")); + } + + public static ConverseResult getTestConverseResult(String resultText) { + var message = new Message().withContent(new ContentBlock().withText(resultText)); + var converseOutput = new ConverseOutput().withMessage(message); + return new ConverseResult().withOutput(converseOutput); + } + + public static InvokeModelResult getTestInvokeResult(String resultJson) throws CharacterCodingException { + var result = new InvokeModelResult(); + result.setContentType("application/json"); + var encoder = Charset.forName("UTF-8").newEncoder(); + result.setBody(encoder.encode(CharBuffer.wrap(resultJson))); + return result; + } + + public static final String TEST_AMAZON_TITAN_EMBEDDINGS_RESULT = """ + { + "embedding": [0.123, 0.456, 0.678, 0.789], + "inputTextTokenCount": int + }"""; + + public static final String TEST_COHERE_EMBEDDINGS_RESULT = """ + { + "embeddings": [ + [0.123, 0.456, 0.678, 0.789] + ], + "id": string, + "response_type" : "embeddings_floats", + "texts": [string] + } + """; +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java new file mode 100644 index 0000000000000..873b2e22497c6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; + +import java.io.IOException; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneId; + +import static org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockInferenceClient.CLIENT_CACHE_EXPIRY_MINUTES; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; + +public class AmazonBedrockInferenceClientCacheTests extends ESTestCase { + public void testCache_ReturnsSameObject() throws IOException { + AmazonBedrockInferenceClientCache cacheInstance; + try (var cache = new AmazonBedrockInferenceClientCache(AmazonBedrockMockInferenceClient::create, null)) { + cacheInstance = cache; + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId", + "testregion", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access_key", + "secret_key" + ); + + var client = cache.getOrCreateClient(model, null); + + var secondModel = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId_two", + "testregion", + "a_different_model", + AmazonBedrockProvider.COHERE, + "access_key", + "secret_key" + ); + + var secondClient = cache.getOrCreateClient(secondModel, null); + assertThat(client, sameInstance(secondClient)); + + assertThat(cache.clientCount(), is(1)); + + var thirdClient = cache.getOrCreateClient(model, null); + assertThat(client, sameInstance(thirdClient)); + + assertThat(cache.clientCount(), is(1)); + } + assertThat(cacheInstance.clientCount(), is(0)); + } + + public void testCache_ItEvictsExpiredClients() throws IOException { + var clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()); + AmazonBedrockInferenceClientCache cacheInstance; + try (var cache = new AmazonBedrockInferenceClientCache(AmazonBedrockMockInferenceClient::create, clock)) { + cacheInstance = cache; + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId", + "testregion", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access_key", + "secret_key" + ); + + var client = cache.getOrCreateClient(model, null); + + var secondModel = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId_two", + "some_other_region", + "a_different_model", + AmazonBedrockProvider.COHERE, + "other_access_key", + "other_secret_key" + ); + + assertThat(cache.clientCount(), is(1)); + + var secondClient = cache.getOrCreateClient(secondModel, null); + assertThat(client, not(sameInstance(secondClient))); + + assertThat(cache.clientCount(), is(2)); + + // set clock to after expiry + cache.setClock(Clock.fixed(clock.instant().plus(Duration.ofMinutes(CLIENT_CACHE_EXPIRY_MINUTES + 1)), ZoneId.systemDefault())); + + // get another client, this will ensure flushExpiredClients is called + var regetSecondClient = cache.getOrCreateClient(secondModel, null); + assertThat(secondClient, sameInstance(regetSecondClient)); + + var regetFirstClient = cache.getOrCreateClient(model, null); + assertThat(client, not(sameInstance(regetFirstClient))); + } + assertThat(cacheInstance.clientCount(), is(0)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockClientCache.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockClientCache.java new file mode 100644 index 0000000000000..912967a9012d7 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockClientCache.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.io.IOException; + +public class AmazonBedrockMockClientCache implements AmazonBedrockClientCache { + private ConverseResult converseResult = null; + private InvokeModelResult invokeModelResult = null; + private ElasticsearchException exceptionToThrow = null; + + public AmazonBedrockMockClientCache() {} + + public AmazonBedrockMockClientCache( + @Nullable ConverseResult converseResult, + @Nullable InvokeModelResult invokeModelResult, + @Nullable ElasticsearchException exceptionToThrow + ) { + this.converseResult = converseResult; + this.invokeModelResult = invokeModelResult; + this.exceptionToThrow = exceptionToThrow; + } + + @Override + public AmazonBedrockBaseClient getOrCreateClient(AmazonBedrockModel model, TimeValue timeout) { + var client = (AmazonBedrockMockInferenceClient) AmazonBedrockMockInferenceClient.create(model, timeout); + client.setConverseResult(converseResult); + client.setInvokeModelResult(invokeModelResult); + client.setExceptionToThrow(exceptionToThrow); + return client; + } + + @Override + public void close() throws IOException { + // nothing to do + } + + public void setConverseResult(ConverseResult converseResult) { + this.converseResult = converseResult; + } + + public void setInvokeModelResult(InvokeModelResult invokeModelResult) { + this.invokeModelResult = invokeModelResult; + } + + public void setExceptionToThrow(ElasticsearchException exceptionToThrow) { + this.exceptionToThrow = exceptionToThrow; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockExecuteRequestSender.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockExecuteRequestSender.java new file mode 100644 index 0000000000000..b0df8a40e2551 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockExecuteRequestSender.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockRequest; +import org.elasticsearch.xpack.inference.external.response.amazonbedrock.AmazonBedrockResponseHandler; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; + +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.function.Supplier; + +public class AmazonBedrockMockExecuteRequestSender extends AmazonBedrockExecuteOnlyRequestSender { + + private Queue results = new ConcurrentLinkedQueue<>(); + private Queue> inputs = new ConcurrentLinkedQueue<>(); + private int sendCounter = 0; + + public AmazonBedrockMockExecuteRequestSender(AmazonBedrockClientCache clientCache, ThrottlerManager throttlerManager) { + super(clientCache, throttlerManager); + } + + public void enqueue(Object result) { + results.add(result); + } + + public int sendCount() { + return sendCounter; + } + + public List getInputs() { + return inputs.remove(); + } + + @Override + protected AmazonBedrockExecutor createExecutor( + AmazonBedrockRequest awsRequest, + AmazonBedrockResponseHandler awsResponse, + Logger logger, + Supplier hasRequestTimedOutFunction, + ActionListener listener + ) { + setCacheResult(); + return super.createExecutor(awsRequest, awsResponse, logger, hasRequestTimedOutFunction, listener); + } + + private void setCacheResult() { + var mockCache = (AmazonBedrockMockClientCache) this.clientCache; + var result = results.remove(); + if (result instanceof ConverseResult converseResult) { + mockCache.setConverseResult(converseResult); + return; + } + + if (result instanceof InvokeModelResult invokeModelResult) { + mockCache.setInvokeModelResult(invokeModelResult); + return; + } + + if (result instanceof ElasticsearchException exception) { + mockCache.setExceptionToThrow(exception); + return; + } + + throw new RuntimeException("Unknown result type: " + result.getClass()); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockInferenceClient.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockInferenceClient.java new file mode 100644 index 0000000000000..dcbf8dfcbff01 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockInferenceClient.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import com.amazonaws.services.bedrockruntime.AmazonBedrockRuntimeAsync; +import com.amazonaws.services.bedrockruntime.model.ConverseResult; +import com.amazonaws.services.bedrockruntime.model.InvokeModelResult; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class AmazonBedrockMockInferenceClient extends AmazonBedrockInferenceClient { + private ConverseResult converseResult = null; + private InvokeModelResult invokeModelResult = null; + private ElasticsearchException exceptionToThrow = null; + + private Future converseResultFuture = new MockConverseResultFuture(); + private Future invokeModelResultFuture = new MockInvokeResultFuture(); + + public static AmazonBedrockBaseClient create(AmazonBedrockModel model, @Nullable TimeValue timeout) { + return new AmazonBedrockMockInferenceClient(model, timeout); + } + + protected AmazonBedrockMockInferenceClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + super(model, timeout); + } + + public void setExceptionToThrow(ElasticsearchException exceptionToThrow) { + this.exceptionToThrow = exceptionToThrow; + } + + public void setConverseResult(ConverseResult result) { + this.converseResult = result; + } + + public void setInvokeModelResult(InvokeModelResult result) { + this.invokeModelResult = result; + } + + @Override + protected AmazonBedrockRuntimeAsync createAmazonBedrockClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { + var runtimeClient = mock(AmazonBedrockRuntimeAsync.class); + doAnswer(invocation -> invokeModelResultFuture).when(runtimeClient).invokeModelAsync(any()); + doAnswer(invocation -> converseResultFuture).when(runtimeClient).converseAsync(any()); + + return runtimeClient; + } + + @Override + void close() {} + + private class MockConverseResultFuture implements Future { + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return false; + } + + @Override + public ConverseResult get() throws InterruptedException, ExecutionException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return converseResult; + } + + @Override + public ConverseResult get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return converseResult; + } + } + + private class MockInvokeResultFuture implements Future { + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return false; + } + + @Override + public InvokeModelResult get() throws InterruptedException, ExecutionException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return invokeModelResult; + } + + @Override + public InvokeModelResult get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + if (exceptionToThrow != null) { + throw exceptionToThrow; + } + return invokeModelResult; + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java new file mode 100644 index 0000000000000..e68beaf4c1eb5 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockMockRequestSender.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.InferenceInputs; +import org.elasticsearch.xpack.inference.external.http.sender.RequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponents; + +import java.io.IOException; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; + +public class AmazonBedrockMockRequestSender implements Sender { + + public static class Factory extends AmazonBedrockRequestSender.Factory { + private final Sender sender; + + public Factory(ServiceComponents serviceComponents, ClusterService clusterService) { + super(serviceComponents, clusterService); + this.sender = new AmazonBedrockMockRequestSender(); + } + + public Sender createSender() { + return sender; + } + } + + private Queue results = new ConcurrentLinkedQueue<>(); + private Queue> inputs = new ConcurrentLinkedQueue<>(); + private int sendCounter = 0; + + public void enqueue(Object result) { + results.add(result); + } + + public int sendCount() { + return sendCounter; + } + + public List getInputs() { + return inputs.remove(); + } + + @Override + public void start() { + // do nothing + } + + @Override + public void send( + RequestManager requestCreator, + InferenceInputs inferenceInputs, + TimeValue timeout, + ActionListener listener + ) { + sendCounter++; + var docsInput = (DocumentsOnlyInput) inferenceInputs; + inputs.add(docsInput.getInputs()); + + if (results.isEmpty()) { + listener.onFailure(new ElasticsearchException("No results found")); + } else { + var resultObject = results.remove(); + if (resultObject instanceof InferenceServiceResults inferenceResult) { + listener.onResponse(inferenceResult); + } else if (resultObject instanceof Exception e) { + listener.onFailure(e); + } else { + throw new RuntimeException("Unknown result type: " + resultObject.getClass()); + } + } + } + + @Override + public void close() throws IOException { + // do nothing + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java new file mode 100644 index 0000000000000..7fa8a09d5bf12 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockRequestSenderTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.amazonbedrock; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockChatCompletionRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.AmazonBedrockEmbeddingsRequestManager; +import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockExecutorTests.TEST_AMAZON_TITAN_EMBEDDINGS_RESULT; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class AmazonBedrockRequestSenderTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private ThreadPool threadPool; + private final AtomicReference threadRef = new AtomicReference<>(); + + @Before + public void init() throws Exception { + threadPool = createThreadPool(inferenceUtilityPool()); + threadRef.set(null); + } + + @After + public void shutdown() throws IOException, InterruptedException { + if (threadRef.get() != null) { + threadRef.get().join(TIMEOUT.millis()); + } + + terminate(threadPool); + } + + public void testCreateSender_SendsEmbeddingsRequestAndReceivesResponse() throws Exception { + var senderFactory = createSenderFactory(threadPool, Settings.EMPTY); + var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class)); + requestSender.enqueue(AmazonBedrockExecutorTests.getTestInvokeResult(TEST_AMAZON_TITAN_EMBEDDINGS_RESULT)); + try (var sender = createSender(senderFactory, requestSender)) { + sender.start(); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + var serviceComponents = ServiceComponentsTests.createWithEmptySettings(threadPool); + var requestManager = new AmazonBedrockEmbeddingsRequestManager( + model, + serviceComponents.truncator(), + threadPool, + new TimeValue(30, TimeUnit.SECONDS) + ); + sender.send(requestManager, new DocumentsOnlyInput(List.of("abc")), null, listener); + + var result = listener.actionGet(TIMEOUT); + assertThat(result.asMap(), is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.456F, 0.678F, 0.789F })))); + } + } + + public void testCreateSender_SendsCompletionRequestAndReceivesResponse() throws Exception { + var senderFactory = createSenderFactory(threadPool, Settings.EMPTY); + var requestSender = new AmazonBedrockMockExecuteRequestSender(new AmazonBedrockMockClientCache(), mock(ThrottlerManager.class)); + requestSender.enqueue(AmazonBedrockExecutorTests.getTestConverseResult("test response text")); + try (var sender = createSender(senderFactory, requestSender)) { + sender.start(); + + var model = AmazonBedrockChatCompletionModelTests.createModel( + "test_id", + "test_region", + "test_model", + AmazonBedrockProvider.AMAZONTITAN, + "accesskey", + "secretkey" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + var requestManager = new AmazonBedrockChatCompletionRequestManager(model, threadPool, new TimeValue(30, TimeUnit.SECONDS)); + sender.send(requestManager, new DocumentsOnlyInput(List.of("abc")), null, listener); + + var result = listener.actionGet(TIMEOUT); + assertThat(result.asMap(), is(buildExpectationCompletion(List.of("test response text")))); + } + } + + public static AmazonBedrockRequestSender.Factory createSenderFactory(ThreadPool threadPool, Settings settings) { + return new AmazonBedrockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, settings), + mockClusterServiceEmpty() + ); + } + + public static Sender createSender(AmazonBedrockRequestSender.Factory factory, AmazonBedrockExecuteOnlyRequestSender requestSender) { + return factory.createSender(requestSender); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicResponseHandlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicResponseHandlerTests.java new file mode 100644 index 0000000000000..0b9390f293ff9 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/anthropic/AnthropicResponseHandlerTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.anthropic; + +import org.apache.http.Header; +import org.apache.http.HeaderElement; +import org.apache.http.HttpResponse; +import org.apache.http.StatusLine; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AnthropicResponseHandlerTests extends ESTestCase { + + public void testCheckForFailureStatusCode_DoesNotThrowFor200() { + callCheckForFailureStatusCode(200, "id"); + } + + public void testCheckForFailureStatusCode_ThrowsFor500_ShouldRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(500, "id")); + assertTrue(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received a server error status code for request from inference entity id [id] status [500]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + public void testCheckForFailureStatusCode_ThrowsFor529_ShouldRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(529, "id")); + assertTrue(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString( + "Received an Anthropic server is temporarily overloaded status code for request from inference entity id [id] status [529]" + ) + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + public void testCheckForFailureStatusCode_ThrowsFor505_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(505, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received a server error status code for request from inference entity id [id] status [505]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + public void testCheckForFailureStatusCode_ThrowsFor429_ShouldRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(429, "id")); + assertTrue(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString( + "Received a rate limit status code. Token limit [unknown], remaining tokens [unknown], tokens reset [unknown]. " + + "Request limit [unknown], remaining requests [unknown], request reset [unknown]. " + + "Retry after [unknown] for request from inference entity id [id] status [429]" + ) + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.TOO_MANY_REQUESTS)); + } + + public void testCheckForFailureStatusCode_ThrowsFor429_ShouldRetry_RetrievesFieldsFromHeaders() { + int statusCode = 429; + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(statusCode); + var response = mock(HttpResponse.class); + when(response.getStatusLine()).thenReturn(statusLine); + var httpResult = new HttpResult(response, new byte[] {}); + + when(response.getFirstHeader(AnthropicResponseHandler.REQUESTS_LIMIT)).thenReturn( + new BasicHeader(AnthropicResponseHandler.REQUESTS_LIMIT, "3000") + ); + when(response.getFirstHeader(AnthropicResponseHandler.REMAINING_REQUESTS)).thenReturn( + new BasicHeader(AnthropicResponseHandler.REMAINING_REQUESTS, "2999") + ); + when(response.getFirstHeader(AnthropicResponseHandler.TOKENS_LIMIT)).thenReturn( + new BasicHeader(AnthropicResponseHandler.TOKENS_LIMIT, "10000") + ); + when(response.getFirstHeader(AnthropicResponseHandler.REMAINING_TOKENS)).thenReturn( + new BasicHeader(AnthropicResponseHandler.REMAINING_TOKENS, "99800") + ); + when(response.getFirstHeader(AnthropicResponseHandler.REQUEST_RESET)).thenReturn( + new BasicHeader(AnthropicResponseHandler.REQUEST_RESET, "123") + ); + when(response.getFirstHeader(AnthropicResponseHandler.TOKENS_RESET)).thenReturn( + new BasicHeader(AnthropicResponseHandler.TOKENS_RESET, "456") + ); + when(response.getFirstHeader(AnthropicResponseHandler.RETRY_AFTER)).thenReturn( + new BasicHeader(AnthropicResponseHandler.RETRY_AFTER, "2") + ); + + var error = AnthropicResponseHandler.buildRateLimitErrorMessage(httpResult); + assertThat( + error, + containsString( + "Received a rate limit status code. Token limit [10000], remaining tokens [99800], tokens reset [456]. " + + "Request limit [3000], remaining requests [2999], request reset [123]. Retry after [2]" + ) + ); + } + + public void testCheckForFailureStatusCode_ThrowsFor403_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(403, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received a permission denied error status code for request from inference entity id [id] status [403]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.FORBIDDEN)); + } + + public void testCheckForFailureStatusCode_ThrowsFor300_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(300, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Unhandled redirection for request from inference entity id [id] status [300]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.MULTIPLE_CHOICES)); + } + + public void testCheckForFailureStatusCode_ThrowsFor425_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(425, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received an unsuccessful status code for request from inference entity id [id] status [425]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + private static void callCheckForFailureStatusCode(int statusCode, String inferenceEntityId) { + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(statusCode); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + var header = mock(Header.class); + when(header.getElements()).thenReturn(new HeaderElement[] {}); + when(httpResponse.getFirstHeader(anyString())).thenReturn(header); + + var mockRequest = mock(Request.class); + when(mockRequest.getInferenceEntityId()).thenReturn(inferenceEntityId); + var httpResult = new HttpResult(httpResponse, new byte[] {}); + var handler = new AnthropicResponseHandler("", (request, result) -> null); + + handler.checkForFailureStatusCode(mockRequest, httpResult); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/googlevertexai/GoogleVertexAiResponseHandlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/googlevertexai/GoogleVertexAiResponseHandlerTests.java new file mode 100644 index 0000000000000..f2de009edec44 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/googlevertexai/GoogleVertexAiResponseHandlerTests.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.googlevertexai; + +import org.apache.http.Header; +import org.apache.http.HeaderElement; +import org.apache.http.HttpResponse; +import org.apache.http.StatusLine; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.http.retry.RetryException; +import org.elasticsearch.xpack.inference.external.request.Request; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class GoogleVertexAiResponseHandlerTests extends ESTestCase { + + public void testCheckForFailureStatusCode_DoesNotThrowFor200() { + callCheckForFailureStatusCode(200, "id"); + } + + public void testCheckForFailureStatusCode_ThrowsFor500_ShouldRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(500, "id")); + assertTrue(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received a server error status code for request from inference entity id [id] status [500]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + public void testCheckForFailureStatusCode_ThrowsFor503_ShouldRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(503, "id")); + assertTrue(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString( + "The Google Vertex AI service may be temporarily overloaded or down for request from inference entity id [id] status [503]" + ) + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + public void testCheckForFailureStatusCode_ThrowsFor505_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(505, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received a server error status code for request from inference entity id [id] status [505]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + public void testCheckForFailureStatusCode_ThrowsFor429_ShouldRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(429, "id")); + assertTrue(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received a rate limit status code for request from inference entity id [id] status [429]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.TOO_MANY_REQUESTS)); + } + + public void testCheckForFailureStatusCode_ThrowsFor404_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(404, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Resource not found at [null] for request from inference entity id [id] status [404]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.NOT_FOUND)); + } + + public void testCheckForFailureStatusCode_ThrowsFor403_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(403, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received a permission denied error status code for request from inference entity id [id] status [403]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.FORBIDDEN)); + } + + public void testCheckForFailureStatusCode_ThrowsFor300_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(300, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Unhandled redirection for request from inference entity id [id] status [300]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.MULTIPLE_CHOICES)); + } + + public void testCheckForFailureStatusCode_ThrowsFor425_ShouldNotRetry() { + var exception = expectThrows(RetryException.class, () -> callCheckForFailureStatusCode(425, "id")); + assertFalse(exception.shouldRetry()); + assertThat( + exception.getCause().getMessage(), + containsString("Received an unsuccessful status code for request from inference entity id [id] status [425]") + ); + assertThat(((ElasticsearchStatusException) exception.getCause()).status(), is(RestStatus.BAD_REQUEST)); + } + + private static void callCheckForFailureStatusCode(int statusCode, String modelId) { + var statusLine = mock(StatusLine.class); + when(statusLine.getStatusCode()).thenReturn(statusCode); + + var httpResponse = mock(HttpResponse.class); + when(httpResponse.getStatusLine()).thenReturn(statusLine); + var header = mock(Header.class); + when(header.getElements()).thenReturn(new HeaderElement[] {}); + when(httpResponse.getFirstHeader(anyString())).thenReturn(header); + + var mockRequest = mock(Request.class); + when(mockRequest.getInferenceEntityId()).thenReturn(modelId); + var httpResult = new HttpResult(httpResponse, new byte[] {}); + var handler = new GoogleVertexAiResponseHandler("", (request, result) -> null); + + handler.checkForFailureStatusCode(mockRequest, httpResult); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java index 30bd40bdcc111..c2842a1278a49 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/retry/RetryingHttpSenderTests.java @@ -18,9 +18,11 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.xpack.inference.external.http.HttpClient; import org.elasticsearch.xpack.inference.external.http.HttpResult; import org.elasticsearch.xpack.inference.external.request.HttpRequestTests; @@ -33,6 +35,7 @@ import java.net.UnknownHostException; import static org.elasticsearch.xpack.inference.external.http.retry.RetrySettingsTests.createDefaultRetrySettings; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -455,6 +458,86 @@ public void testSend_ReturnsFailure_WhenHttpResultsListenerCallsOnFailure_WithNo verifyNoMoreInteractions(httpClient); } + public void testSend_DoesNotRetryIndefinitely() throws IOException { + var threadPool = new TestThreadPool(getTestName()); + try { + + var httpClient = mock(HttpClient.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + // respond with a retryable exception + listener.onFailure(new ConnectionClosedException("failed")); + + return Void.TYPE; + }).when(httpClient).send(any(), any(), any()); + + var handler = mock(ResponseHandler.class); + + var retrier = new RetryingHttpSender( + httpClient, + mock(ThrottlerManager.class), + createDefaultRetrySettings(), + threadPool, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener); + + // Assert that the retrying sender stopped after max retires even though the exception is retryable + var thrownException = expectThrows(UncategorizedExecutionException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getCause(), instanceOf(ConnectionClosedException.class)); + assertThat(thrownException.getMessage(), is("Failed execution")); + assertThat(thrownException.getSuppressed().length, is(0)); + verify(httpClient, times(RetryingHttpSender.MAX_RETIES)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); + } finally { + terminate(threadPool); + } + } + + public void testSend_DoesNotRetryIndefinitely_WithAlwaysRetryingResponseHandler() throws IOException { + var threadPool = new TestThreadPool(getTestName()); + try { + + var httpClient = mock(HttpClient.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onFailure(new ConnectionClosedException("failed")); + + return Void.TYPE; + }).when(httpClient).send(any(), any(), any()); + + // This handler will always tell the sender to retry + var handler = createRetryingResponseHandler(); + + var retrier = new RetryingHttpSender( + httpClient, + mock(ThrottlerManager.class), + createDefaultRetrySettings(), + threadPool, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + var listener = new PlainActionFuture(); + retrier.send(mock(Logger.class), mockRequest(), HttpClientContext.create(), () -> false, handler, listener); + + // Assert that the retrying sender stopped after max retires + var thrownException = expectThrows(UncategorizedExecutionException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getCause(), instanceOf(ConnectionClosedException.class)); + assertThat(thrownException.getMessage(), is("Failed execution")); + assertThat(thrownException.getSuppressed().length, is(0)); + verify(httpClient, times(RetryingHttpSender.MAX_RETIES)).send(any(), any(), any()); + verifyNoMoreInteractions(httpClient); + } finally { + terminate(threadPool); + } + } + private static HttpResponse mockHttpResponse() { var statusLine = mock(StatusLine.class); when(statusLine.getStatusCode()).thenReturn(200); @@ -499,4 +582,27 @@ private RetryingHttpSender createRetrier(HttpClient httpClient) { EsExecutors.DIRECT_EXECUTOR_SERVICE ); } + + private ResponseHandler createRetryingResponseHandler() { + // Returns a response handler that wants to retry. + // Does not need to handle parsing as it should only be used + // testing failed requests + return new ResponseHandler() { + @Override + public void validateResponse(ThrottlerManager throttlerManager, Logger logger, Request request, HttpResult result) + throws RetryException { + throw new RetryException(true, new IOException("response handler validate failed as designed")); + } + + @Override + public InferenceServiceResults parseResult(Request request, HttpResult result) throws RetryException { + throw new RetryException(true, new IOException("response handler parse failed as designed")); + } + + @Override + public String getRequestType() { + return "foo"; + } + }; + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/BaseRequestManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/BaseRequestManagerTests.java index 03838896b879d..bf120be621ad3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/BaseRequestManagerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/BaseRequestManagerTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.inference.external.http.retry.RequestSender; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; -import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; @@ -30,8 +29,7 @@ public void testRateLimitGrouping_DifferentObjectReferences_HaveSameGroup() { var manager1 = new BaseRequestManager(mock(ThreadPool.class), "id", val1, new RateLimitSettings(1)) { @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener @@ -43,8 +41,7 @@ public void execute( var manager2 = new BaseRequestManager(mock(ThreadPool.class), "id", val2, new RateLimitSettings(1)) { @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener @@ -62,8 +59,7 @@ public void testRateLimitGrouping_DifferentSettings_HaveDifferentGroup() { var manager1 = new BaseRequestManager(mock(ThreadPool.class), "id", val1, new RateLimitSettings(1)) { @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener @@ -75,8 +71,7 @@ public void execute( var manager2 = new BaseRequestManager(mock(ThreadPool.class), "id", val1, new RateLimitSettings(2)) { @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener @@ -94,8 +89,7 @@ public void testRateLimitGrouping_DifferentSettingsTimeUnit_HaveDifferentGroup() var manager1 = new BaseRequestManager(mock(ThreadPool.class), "id", val1, new RateLimitSettings(1, TimeUnit.MILLISECONDS)) { @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener @@ -107,8 +101,7 @@ public void execute( var manager2 = new BaseRequestManager(mock(ThreadPool.class), "id", val1, new RateLimitSettings(1, TimeUnit.DAYS)) { @Override public void execute( - String query, - List input, + InferenceInputs inferenceInputs, RequestSender requestSender, Supplier hasRequestCompletedFunction, ActionListener listener diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java index 2b8b5f178b3de..79f6aa8164b75 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderTests.java @@ -106,7 +106,7 @@ public void testCreateSender_SendsRequestAndReceivesResponse() throws Exception PlainActionFuture listener = new PlainActionFuture<>(); sender.send( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator(getUrl(webServer), null, "key", "model", null, threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator(getUrl(webServer), null, "key", "model", null, threadPool), new DocumentsOnlyInput(List.of("abc")), null, listener diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsRequestManagerTests.java similarity index 95% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsRequestManagerTests.java index 37fce8d3f3a7b..eb7f7c4a0035d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsExecutableRequestCreatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/OpenAiEmbeddingsRequestManagerTests.java @@ -13,7 +13,7 @@ import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsModelTests.createModel; -public class OpenAiEmbeddingsExecutableRequestCreatorTests { +public class OpenAiEmbeddingsRequestManagerTests { public static OpenAiEmbeddingsRequestManager makeCreator( String url, @Nullable String org, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java index 9a45e10007643..762a3a74184a4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorServiceTests.java @@ -131,7 +131,7 @@ public void testIsTerminated_AfterStopFromSeparateThread() { PlainActionFuture listener = new PlainActionFuture<>(); service.execute( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "id", null, threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator("url", null, "key", "id", null, threadPool), new DocumentsOnlyInput(List.of()), null, listener @@ -208,7 +208,7 @@ public void testTaskThrowsError_CallsOnFailure() { PlainActionFuture listener = new PlainActionFuture<>(); service.execute( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "id", null, threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator("url", null, "key", "id", null, threadPool), new DocumentsOnlyInput(List.of()), null, listener diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java index 291de740aca34..8b7c01ae133cf 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestManagerTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -44,7 +43,7 @@ public static RequestManager createMock(RequestSender requestSender, String infe doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[4]; + ActionListener listener = (ActionListener) invocation.getArguments()[3]; requestSender.send( mock(Logger.class), RequestTests.mockRequest(inferenceEntityId), @@ -55,7 +54,7 @@ public static RequestManager createMock(RequestSender requestSender, String infe ); return Void.TYPE; - }).when(mockManager).execute(any(), anyList(), any(), any(), any()); + }).when(mockManager).execute(any(), any(), any(), any()); // just return something consistent so the hashing works when(mockManager.rateLimitGrouping()).thenReturn(inferenceEntityId); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java index 13c395180cd16..c839c266e9320 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java @@ -59,7 +59,7 @@ public void testExecuting_DoesNotCallOnFailureForTimeout_AfterIllegalArgumentExc ActionListener listener = mock(ActionListener.class); var requestTask = new RequestTask( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id", threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator("url", null, "key", "model", null, "id", threadPool), new DocumentsOnlyInput(List.of("abc")), TimeValue.timeValueMillis(1), mockThreadPool, @@ -79,7 +79,7 @@ public void testRequest_ReturnsTimeoutException() { PlainActionFuture listener = new PlainActionFuture<>(); var requestTask = new RequestTask( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id", threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator("url", null, "key", "model", null, "id", threadPool), new DocumentsOnlyInput(List.of("abc")), TimeValue.timeValueMillis(1), threadPool, @@ -105,7 +105,7 @@ public void testRequest_DoesNotCallOnFailureTwiceWhenTimingOut() throws Exceptio }).when(listener).onFailure(any()); var requestTask = new RequestTask( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id", threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator("url", null, "key", "model", null, "id", threadPool), new DocumentsOnlyInput(List.of("abc")), TimeValue.timeValueMillis(1), threadPool, @@ -137,7 +137,7 @@ public void testRequest_DoesNotCallOnResponseAfterTimingOut() throws Exception { }).when(listener).onFailure(any()); var requestTask = new RequestTask( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id", threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator("url", null, "key", "model", null, "id", threadPool), new DocumentsOnlyInput(List.of("abc")), TimeValue.timeValueMillis(1), threadPool, @@ -167,7 +167,7 @@ public void testRequest_DoesNotCallOnFailureForTimeout_AfterAlreadyCallingOnResp ActionListener listener = mock(ActionListener.class); var requestTask = new RequestTask( - OpenAiEmbeddingsExecutableRequestCreatorTests.makeCreator("url", null, "key", "model", null, "id", threadPool), + OpenAiEmbeddingsRequestManagerTests.makeCreator("url", null, "key", "model", null, "id", threadPool), new DocumentsOnlyInput(List.of("abc")), TimeValue.timeValueMillis(1), mockThreadPool, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..b91aab5410048 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAI21LabsCompletionRequestEntityTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockAI21LabsCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockAI21LabsCompletionRequestEntity(List.of("test message"), null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..89d5fec7efba6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockAnthropicCompletionRequestEntityTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockAnthropicCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), 1.0, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopK() { + var request = new AmazonBedrockAnthropicCompletionRequestEntity(List.of("test message"), null, null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopKInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..8df5c7f32e529 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockCohereCompletionRequestEntityTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockCohereCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), 1.0, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopK() { + var request = new AmazonBedrockCohereCompletionRequestEntity(List.of("test message"), null, null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopKInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestUtils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestUtils.java new file mode 100644 index 0000000000000..cbbe3c5554967 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockConverseRequestUtils.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import com.amazonaws.services.bedrockruntime.model.ContentBlock; +import com.amazonaws.services.bedrockruntime.model.ConverseRequest; +import com.amazonaws.services.bedrockruntime.model.Message; + +import org.elasticsearch.core.Strings; + +public final class AmazonBedrockConverseRequestUtils { + public static ConverseRequest getConverseRequest(String modelId, AmazonBedrockConverseRequestEntity requestEntity) { + var converseRequest = new ConverseRequest().withModelId(modelId); + converseRequest = requestEntity.addMessages(converseRequest); + converseRequest = requestEntity.addInferenceConfig(converseRequest); + converseRequest = requestEntity.addAdditionalModelFields(converseRequest); + return converseRequest; + } + + public static boolean doesConverseRequestHasMessage(ConverseRequest converseRequest, String expectedMessage) { + for (Message message : converseRequest.getMessages()) { + var content = message.getContent(); + for (ContentBlock contentBlock : content) { + if (contentBlock.getText().equals(expectedMessage)) { + return true; + } + } + } + return false; + } + + public static boolean doesConverseRequestHaveAnyTemperatureInput(ConverseRequest converseRequest) { + return converseRequest.getInferenceConfig() != null + && converseRequest.getInferenceConfig().getTemperature() != null + && (converseRequest.getInferenceConfig().getTemperature().isNaN() == false); + } + + public static boolean doesConverseRequestHaveAnyTopPInput(ConverseRequest converseRequest) { + return converseRequest.getInferenceConfig() != null + && converseRequest.getInferenceConfig().getTopP() != null + && (converseRequest.getInferenceConfig().getTopP().isNaN() == false); + } + + public static boolean doesConverseRequestHaveAnyMaxTokensInput(ConverseRequest converseRequest) { + return converseRequest.getInferenceConfig() != null && converseRequest.getInferenceConfig().getMaxTokens() != null; + } + + public static boolean doesConverseRequestHaveTemperatureInput(ConverseRequest converseRequest, Double temperature) { + return doesConverseRequestHaveAnyTemperatureInput(converseRequest) + && converseRequest.getInferenceConfig().getTemperature().equals(temperature.floatValue()); + } + + public static boolean doesConverseRequestHaveTopPInput(ConverseRequest converseRequest, Double topP) { + return doesConverseRequestHaveAnyTopPInput(converseRequest) + && converseRequest.getInferenceConfig().getTopP().equals(topP.floatValue()); + } + + public static boolean doesConverseRequestHaveMaxTokensInput(ConverseRequest converseRequest, Integer maxTokens) { + return doesConverseRequestHaveAnyMaxTokensInput(converseRequest) + && converseRequest.getInferenceConfig().getMaxTokens().equals(maxTokens); + } + + public static boolean doesConverseRequestHaveAnyTopKInput(ConverseRequest converseRequest) { + if (converseRequest.getAdditionalModelResponseFieldPaths() == null) { + return false; + } + + for (String fieldPath : converseRequest.getAdditionalModelResponseFieldPaths()) { + if (fieldPath.contains("{\"top_k\":")) { + return true; + } + } + return false; + } + + public static boolean doesConverseRequestHaveTopKInput(ConverseRequest converseRequest, Double topK) { + if (doesConverseRequestHaveAnyTopKInput(converseRequest) == false) { + return false; + } + + var checkString = Strings.format("{\"top_k\":%f}", topK.floatValue()); + for (String fieldPath : converseRequest.getAdditionalModelResponseFieldPaths()) { + if (fieldPath.contains(checkString)) { + return true; + } + } + return false; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..fa482669a0bb2 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMetaCompletionRequestEntityTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockMetaCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockMetaCompletionRequestEntity(List.of("test message"), null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..788625d3702b8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockMistralCompletionRequestEntityTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockMistralCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), 1.0, null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopK() { + var request = new AmazonBedrockMistralCompletionRequestEntity(List.of("test message"), null, null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopKInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..79fa387876c8b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/completion/AmazonBedrockTitanCompletionRequestEntityTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHasMessage; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopKInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveAnyTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveMaxTokensInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTemperatureInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.doesConverseRequestHaveTopPInput; +import static org.elasticsearch.xpack.inference.external.request.amazonbedrock.completion.AmazonBedrockConverseRequestUtils.getConverseRequest; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockTitanCompletionRequestEntityTests extends ESTestCase { + public void testRequestEntity_CreatesProperRequest() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), null, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTemperature() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), 1.0, null, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertTrue(doesConverseRequestHaveTemperatureInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithTopP() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), null, 1.0, null); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertTrue(doesConverseRequestHaveTopPInput(builtRequest, 1.0)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyMaxTokensInput(builtRequest)); + } + + public void testRequestEntity_CreatesProperRequest_WithMaxTokens() { + var request = new AmazonBedrockTitanCompletionRequestEntity(List.of("test message"), null, null, 128); + var builtRequest = getConverseRequest("testmodel", request); + assertThat(builtRequest.getModelId(), is("testmodel")); + assertThat(doesConverseRequestHasMessage(builtRequest, "test message"), is(true)); + assertFalse(doesConverseRequestHaveAnyTemperatureInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopPInput(builtRequest)); + assertFalse(doesConverseRequestHaveAnyTopKInput(builtRequest)); + assertTrue(doesConverseRequestHaveMaxTokensInput(builtRequest, 128)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntityTests.java new file mode 100644 index 0000000000000..fd8114f889d6a --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockCohereEmbeddingsRequestEntityTests.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockJsonBuilder; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockCohereEmbeddingsRequestEntityTests extends ESTestCase { + public void testRequestEntity_GeneratesExpectedJsonBody() throws IOException { + var entity = new AmazonBedrockCohereEmbeddingsRequestEntity(List.of("test input")); + var builder = new AmazonBedrockJsonBuilder(entity); + var result = builder.getStringContent(); + assertThat(result, is("{\"texts\":[\"test input\"],\"input_type\":\"search_document\"}")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntityTests.java new file mode 100644 index 0000000000000..da98fa251fdc8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/amazonbedrock/embeddings/AmazonBedrockTitanEmbeddingsRequestEntityTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.amazonbedrock.embeddings; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.request.amazonbedrock.AmazonBedrockJsonBuilder; + +import java.io.IOException; + +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockTitanEmbeddingsRequestEntityTests extends ESTestCase { + public void testRequestEntity_GeneratesExpectedJsonBody() throws IOException { + var entity = new AmazonBedrockTitanEmbeddingsRequestEntity("test input"); + var builder = new AmazonBedrockJsonBuilder(entity); + var result = builder.getStringContent(); + assertThat(result, is("{\"inputText\":\"test input\"}")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestEntityTests.java new file mode 100644 index 0000000000000..f293a59e47d11 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestEntityTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.anthropic; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionServiceSettings; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettings; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.CoreMatchers.is; + +public class AnthropicChatCompletionRequestEntityTests extends ESTestCase { + + public void testXContent() throws IOException { + var entity = new AnthropicChatCompletionRequestEntity( + List.of("abc"), + new AnthropicChatCompletionServiceSettings("model", null), + new AnthropicChatCompletionTaskSettings(1, -1.0, 1.2, 3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"messages":[{"role":"user","content":"abc"}],"model":"model","max_tokens":1,"temperature":-1.0,"top_p":1.2,"top_k":3}""")); + + } + + public void testXContent_WithoutTemperature() throws IOException { + var entity = new AnthropicChatCompletionRequestEntity( + List.of("abc"), + new AnthropicChatCompletionServiceSettings("model", null), + new AnthropicChatCompletionTaskSettings(1, null, 1.2, 3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"messages":[{"role":"user","content":"abc"}],"model":"model","max_tokens":1,"top_p":1.2,"top_k":3}""")); + + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestTests.java new file mode 100644 index 0000000000000..0a606c522c13e --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/anthropic/AnthropicChatCompletionRequestTests.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.anthropic; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModelTests; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class AnthropicChatCompletionRequestTests extends ESTestCase { + + public void testCreateRequest() throws IOException { + var request = createRequest("secret", "abc", "model", 2); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getURI().toString(), is(buildAnthropicUri())); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(AnthropicRequestUtils.X_API_KEY).getValue(), is("secret")); + assertThat( + httpPost.getLastHeader(AnthropicRequestUtils.VERSION).getValue(), + is(AnthropicRequestUtils.ANTHROPIC_VERSION_2023_06_01) + ); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("max_tokens"), is(2)); + } + + public void testCreateRequest_TestUrl() throws IOException { + var request = createRequest("fake_url", "secret", "abc", "model", 2); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getURI().toString(), is("fake_url")); + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(AnthropicRequestUtils.X_API_KEY).getValue(), is("secret")); + assertThat( + httpPost.getLastHeader(AnthropicRequestUtils.VERSION).getValue(), + is(AnthropicRequestUtils.ANTHROPIC_VERSION_2023_06_01) + ); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("max_tokens"), is(2)); + } + + public void testTruncate_DoesNotReduceInputTextSize() throws IOException { + var request = createRequest("secret", "abc", "model", 2); + + var truncatedRequest = request.truncate(); + assertThat(request.getURI().toString(), is(buildAnthropicUri())); + + var httpRequest = truncatedRequest.createHttpRequest(); + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(3)); + + // We do not truncate for Anthropic chat completions + assertThat(requestMap.get("messages"), is(List.of(Map.of("role", "user", "content", "abc")))); + assertThat(requestMap.get("model"), is("model")); + assertThat(requestMap.get("max_tokens"), is(2)); + } + + public void testTruncationInfo_ReturnsNull() { + var request = createRequest("secret", "abc", "model", 2); + assertNull(request.getTruncationInfo()); + } + + public static AnthropicChatCompletionRequest createRequest(String apiKey, String input, String model, int maxTokens) { + var chatCompletionModel = AnthropicChatCompletionModelTests.createChatCompletionModel(apiKey, model, maxTokens); + return new AnthropicChatCompletionRequest(List.of(input), chatCompletionModel); + } + + public static AnthropicChatCompletionRequest createRequest(String url, String apiKey, String input, String model, int maxTokens) { + var chatCompletionModel = AnthropicChatCompletionModelTests.createChatCompletionModel(url, apiKey, model, maxTokens); + return new AnthropicChatCompletionRequest(List.of(input), chatCompletionModel); + } + + private static String buildAnthropicUri() { + return Strings.format( + "https://%s/%s/%s", + AnthropicRequestUtils.HOST, + AnthropicRequestUtils.API_VERSION_1, + AnthropicRequestUtils.MESSAGES_PATH + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java new file mode 100644 index 0000000000000..f4912e0862e60 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.inference.MatchersUtils.equalToIgnoringWhitespaceInJsonString; + +public class GoogleVertexAiEmbeddingsRequestEntityTests extends ESTestCase { + + public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), true); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc" + } + ], + "parameters": { + "autoTruncate": true + } + } + """)); + } + + public void testToXContent_SingleEmbeddingRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc" + } + ] + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), true); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc" + }, + { + "content": "def" + } + ], + "parameters": { + "autoTruncate": true + } + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc" + }, + { + "content": "def" + } + ] + } + """)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java new file mode 100644 index 0000000000000..b28fd8d3a0cf9 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.common.Truncator; +import org.elasticsearch.xpack.inference.common.TruncatorTests; +import org.elasticsearch.xpack.inference.external.request.Request; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModelTests; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiEmbeddingsRequestTests extends ESTestCase { + + private static final String AUTH_HEADER_VALUE = "foo"; + + public void testCreateRequest_WithoutDimensionsSet_And_WithoutAutoTruncateSet() throws IOException { + var model = "model"; + var input = "input"; + + var request = createRequest(model, input, null); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(1)); + assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input"))))); + } + + public void testCreateRequest_WithAutoTruncateSet() throws IOException { + var model = "model"; + var input = "input"; + var autoTruncate = true; + + var request = createRequest(model, input, autoTruncate); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(2)); + assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input")), "parameters", Map.of("autoTruncate", true)))); + } + + public void testTruncate_ReducesInputTextSizeByHalf() throws IOException { + var model = "model"; + var input = "abcd"; + + var request = createRequest(model, input, null); + var truncatedRequest = request.truncate(); + var httpRequest = truncatedRequest.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(1)); + assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "ab"))))); + } + + private static GoogleVertexAiEmbeddingsRequest createRequest(String modelId, String input, @Nullable Boolean autoTruncate) { + var embeddingsModel = GoogleVertexAiEmbeddingsModelTests.createModel(modelId, autoTruncate); + + return new GoogleVertexAiEmbeddingsWithoutAuthRequest( + TruncatorTests.createTruncator(), + new Truncator.TruncationResult(List.of(input), new boolean[] { false }), + embeddingsModel + ); + } + + /** + * We use this class to fake the auth implementation to avoid static mocking of {@link GoogleVertexAiRequest} + */ + private static class GoogleVertexAiEmbeddingsWithoutAuthRequest extends GoogleVertexAiEmbeddingsRequest { + + GoogleVertexAiEmbeddingsWithoutAuthRequest( + Truncator truncator, + Truncator.TruncationResult input, + GoogleVertexAiEmbeddingsModel model + ) { + super(truncator, input, model); + } + + @Override + public void decorateWithAuth(HttpPost httpPost) { + httpPost.setHeader(HttpHeaders.AUTHORIZATION, AUTH_HEADER_VALUE); + } + + @Override + public Request truncate() { + GoogleVertexAiEmbeddingsRequest embeddingsRequest = (GoogleVertexAiEmbeddingsRequest) super.truncate(); + return new GoogleVertexAiEmbeddingsWithoutAuthRequest( + embeddingsRequest.truncator(), + embeddingsRequest.truncationResult(), + embeddingsRequest.model() + ); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestEntityTests.java new file mode 100644 index 0000000000000..fd18d2573efcc --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestEntityTests.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.inference.MatchersUtils.equalToIgnoringWhitespaceInJsonString; +import static org.hamcrest.MatcherAssert.assertThat; + +public class GoogleVertexAiRerankRequestEntityTests extends ESTestCase { + public void testXContent_SingleRequest_WritesModelAndTopNIfDefined() throws IOException { + var entity = new GoogleVertexAiRerankRequestEntity("query", List.of("abc"), "model", 8); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "model": "model", + "query": "query", + "records": [ + { + "id": "0", + "content": "abc" + } + ], + "topN": 8 + } + """)); + } + + public void testXContent_SingleRequest_DoesNotWriteModelAndTopNIfNull() throws IOException { + var entity = new GoogleVertexAiRerankRequestEntity("query", List.of("abc"), null, null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "query": "query", + "records": [ + { + "id": "0", + "content": "abc" + } + ] + } + """)); + } + + public void testXContent_MultipleRequests_WritesModelAndTopNIfDefined() throws IOException { + var entity = new GoogleVertexAiRerankRequestEntity("query", List.of("abc", "def"), "model", 8); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "model": "model", + "query": "query", + "records": [ + { + "id": "0", + "content": "abc" + }, + { + "id": "1", + "content": "def" + } + ], + "topN": 8 + } + """)); + } + + public void testXContent_MultipleRequests_DoesNotWriteModelAndTopNIfNull() throws IOException { + var entity = new GoogleVertexAiRerankRequestEntity("query", List.of("abc", "def"), null, null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "query": "query", + "records": [ + { + "id": "0", + "content": "abc" + }, + { + "id": "1", + "content": "def" + } + ] + } + """)); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestTests.java new file mode 100644 index 0000000000000..811adb6612a4e --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiRerankRequestTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.googlevertexai; + +import org.apache.http.HttpHeaders; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModelTests; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; + +public class GoogleVertexAiRerankRequestTests extends ESTestCase { + + private static final String AUTH_HEADER_VALUE = "foo"; + + public void testCreateRequest_WithoutModelSet_And_WithoutTopNSet() throws IOException { + var input = "input"; + var query = "query"; + + var request = createRequest(query, input, null, null); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + + assertThat(requestMap, aMapWithSize(2)); + assertThat(requestMap.get("records"), is(List.of(Map.of("id", "0", "content", input)))); + assertThat(requestMap.get("query"), is(query)); + } + + public void testCreateRequest_WithTopNSet() throws IOException { + var input = "input"; + var query = "query"; + var topN = 1; + + var request = createRequest(query, input, null, topN); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("records"), is(List.of(Map.of("id", "0", "content", input)))); + assertThat(requestMap.get("query"), is(query)); + assertThat(requestMap.get("topN"), is(topN)); + } + + public void testCreateRequest_WithModelSet() throws IOException { + var input = "input"; + var query = "query"; + var modelId = "model"; + + var request = createRequest(query, input, modelId, null); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + + assertThat(requestMap, aMapWithSize(3)); + assertThat(requestMap.get("records"), is(List.of(Map.of("id", "0", "content", input)))); + assertThat(requestMap.get("query"), is(query)); + assertThat(requestMap.get("model"), is(modelId)); + } + + public void testTruncate_DoesNotTruncate() { + var request = createRequest("query", "input", null, null); + var truncatedRequest = request.truncate(); + + assertThat(truncatedRequest, sameInstance(request)); + } + + private static GoogleVertexAiRerankRequest createRequest(String query, String input, @Nullable String modelId, @Nullable Integer topN) { + var rerankModel = GoogleVertexAiRerankModelTests.createModel(modelId, topN); + + return new GoogleVertexAiRerankWithoutAuthRequest(query, List.of(input), rerankModel); + } + + /** + * We use this class to fake the auth implementation to avoid static mocking of {@link GoogleVertexAiRequest} + */ + private static class GoogleVertexAiRerankWithoutAuthRequest extends GoogleVertexAiRerankRequest { + GoogleVertexAiRerankWithoutAuthRequest(String query, List input, GoogleVertexAiRerankModel model) { + super(query, input, model); + } + + @Override + public void decorateWithAuth(HttpPost httpPost) { + httpPost.setHeader(HttpHeaders.AUTHORIZATION, AUTH_HEADER_VALUE); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/AzureAndOpenAiExternalResponseHandlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/AzureAndOpenAiExternalResponseHandlerTests.java index 9ef9ab4daa0ae..53bb38943d35b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/AzureAndOpenAiExternalResponseHandlerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/AzureAndOpenAiExternalResponseHandlerTests.java @@ -45,7 +45,7 @@ public void testCheckForFailureStatusCode() { var handler = new AzureMistralOpenAiExternalResponseHandler( "", (request, result) -> null, - AzureMistralOpenAiErrorResponseEntity::fromResponse + ErrorMessageResponseEntity::fromResponse ); // 200 ok diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/AzureAndOpenAiErrorResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/ErrorMessageResponseEntityTests.java similarity index 64% rename from x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/AzureAndOpenAiErrorResponseEntityTests.java rename to x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/ErrorMessageResponseEntityTests.java index 48a560341f392..d57d1537f6c30 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/AzureAndOpenAiErrorResponseEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/ErrorMessageResponseEntityTests.java @@ -12,10 +12,12 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.inference.external.http.HttpResult; +import java.nio.charset.StandardCharsets; + import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; -public class AzureAndOpenAiErrorResponseEntityTests extends ESTestCase { +public class ErrorMessageResponseEntityTests extends ESTestCase { private static HttpResult getMockResult(String jsonString) { var response = mock(HttpResponse.class); @@ -26,23 +28,38 @@ public void testErrorResponse_ExtractsError() { var result = getMockResult(""" {"error":{"message":"test_error_message"}}"""); - var error = AzureMistralOpenAiErrorResponseEntity.fromResponse(result); + var error = ErrorMessageResponseEntity.fromResponse(result); assertNotNull(error); assertThat(error.getErrorMessage(), is("test_error_message")); } + public void testFromResponse_noMessage() { + String responseJson = """ + { + "error": { + "type": "not_found_error", + } + } + """; + + var errorMessage = ErrorMessageResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + assertNull(errorMessage); + } + public void testErrorResponse_ReturnsNullIfNoError() { var result = getMockResult(""" {"noerror":true}"""); - var error = AzureMistralOpenAiErrorResponseEntity.fromResponse(result); + var error = ErrorMessageResponseEntity.fromResponse(result); assertNull(error); } public void testErrorResponse_ReturnsNullIfNotJson() { var result = getMockResult("not a json string"); - var error = AzureMistralOpenAiErrorResponseEntity.fromResponse(result); + var error = ErrorMessageResponseEntity.fromResponse(result); assertNull(error); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java index e1d786819a536..4ae860f394022 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java @@ -126,6 +126,63 @@ public void testPositionParserAtTokenAfterField_ConsumesUntilEnd() throws IOExce } } + public void testPositionParserAtTokenAfterFieldCurrentObj() throws IOException { + var json = """ + { + "key": "value" + } + """; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + parser.nextToken(); + XContentUtils.positionParserAtTokenAfterFieldCurrentFlatObj(parser, "key", "some error"); + + assertEquals("value", parser.text()); + } + } + + public void testPositionParserAtTokenAfterFieldCurrentObj_ThrowsIfFieldIsMissing() throws IOException { + var json = """ + { + "key": "value" + } + """; + var errorFormat = "Error: %s"; + var missingField = "missing field"; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + parser.nextToken(); + var exception = expectThrows( + IllegalStateException.class, + () -> XContentUtils.positionParserAtTokenAfterFieldCurrentFlatObj(parser, missingField, errorFormat) + ); + + assertEquals(String.format(Locale.ROOT, errorFormat, missingField), exception.getMessage()); + } + } + + public void testPositionParserAtTokenAfterFieldCurrentObj_DoesNotFindNested() throws IOException { + var json = """ + { + "nested": { + "key": "value" + } + } + """; + var errorFormat = "Error: %s"; + var missingField = "missing field"; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + parser.nextToken(); + var exception = expectThrows( + IllegalStateException.class, + () -> XContentUtils.positionParserAtTokenAfterFieldCurrentFlatObj(parser, missingField, errorFormat) + ); + + assertEquals(String.format(Locale.ROOT, errorFormat, missingField), exception.getMessage()); + } + } + public void testConsumeUntilObjectEnd() throws IOException { var json = """ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/anthropic/AnthropicChatCompletionResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/anthropic/AnthropicChatCompletionResponseEntityTests.java new file mode 100644 index 0000000000000..e5490d9f8d3ca --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/anthropic/AnthropicChatCompletionResponseEntityTests.java @@ -0,0 +1,265 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.anthropic; + +import org.apache.http.HttpResponse; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class AnthropicChatCompletionResponseEntityTests extends ESTestCase { + + public void testFromResponse_CreatesResultsForASingleItem() throws IOException { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": [ + { + "type": "text", + "text": "result" + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + ChatCompletionResults chatCompletionResults = AnthropicChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(chatCompletionResults.getResults().size(), is(1)); + assertThat(chatCompletionResults.getResults().get(0).content(), is("result")); + } + + public void testFromResponse_CreatesResultsForMultipleItems() throws IOException { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": [ + { + "type": "text", + "text": "result" + }, + { + "type": "text", + "text": "result2" + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + ChatCompletionResults chatCompletionResults = AnthropicChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(chatCompletionResults.getResults().size(), is(2)); + assertThat(chatCompletionResults.getResults().get(0).content(), is("result")); + assertThat(chatCompletionResults.getResults().get(1).content(), is("result2")); + } + + public void testFromResponse_CreatesResultsForMultipleItems_IgnoresTools() throws IOException { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": [ + { + "type": "text", + "text": "result" + }, + { + "type": "tool_use", + "id": "toolu_01Dc8BGR8aEuToS2B9uz6HMX", + "name": "get_weather", + "input": { + "location": "San Francisco, CA" + } + }, + { + "type": "text", + "text": "result2" + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + ChatCompletionResults chatCompletionResults = AnthropicChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(chatCompletionResults.getResults().size(), is(2)); + assertThat(chatCompletionResults.getResults().get(0).content(), is("result")); + assertThat(chatCompletionResults.getResults().get(1).content(), is("result2")); + } + + public void testFromResponse_FailsWhenContentIsNotPresent() { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "not_content": [ + { + "type": "text", + "text": "result" + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> AnthropicChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [content] in Anthropic chat completions response")); + } + + public void testFromResponse_FailsWhenContentFieldNotAnArray() { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": { + "type": "text", + "text": "result" + }, + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> AnthropicChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [START_ARRAY] but found [START_OBJECT]") + ); + } + + public void testFromResponse_FailsWhenTypeDoesNotExist() { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "not_content": [ + { + "text": "result" + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> AnthropicChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [content] in Anthropic chat completions response")); + } + + public void testFromResponse_FailsWhenContentValueIsAString() { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": "hello", + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + var thrownException = expectThrows( + ParsingException.class, + () -> AnthropicChatCompletionResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse object: expecting token of type [START_ARRAY] but found [VALUE_STRING]") + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiEmbeddingsResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiEmbeddingsResponseEntityTests.java new file mode 100644 index 0000000000000..39bf08a21a76b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiEmbeddingsResponseEntityTests.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.googlevertexai; + +import org.apache.http.HttpResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; +import org.elasticsearch.xpack.inference.external.request.Request; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class GoogleVertexAiEmbeddingsResponseEntityTests extends ESTestCase { + + public void testFromResponse_CreatesResultsForASingleItem() throws IOException { + String responseJson = """ + { + "predictions": [ + { + "embeddings": { + "statistics": { + "truncated": false, + "token_count": 6 + }, + "values": [ + -0.123, + 0.123 + ] + } + } + ] + } + """; + + InferenceTextEmbeddingFloatResults parsedResults = GoogleVertexAiEmbeddingsResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat( + parsedResults.embeddings(), + is(List.of(InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(List.of(-0.123F, 0.123F)))) + ); + } + + public void testFromResponse_CreatesResultsForMultipleItems() throws IOException { + String responseJson = """ + { + "predictions": [ + { + "embeddings": { + "statistics": { + "truncated": false, + "token_count": 6 + }, + "values": [ + -0.123, + 0.123 + ] + } + }, + { + "embeddings": { + "statistics": { + "truncated": false, + "token_count": 6 + }, + "values": [ + -0.456, + 0.456 + ] + } + } + ] + } + """; + + InferenceTextEmbeddingFloatResults parsedResults = GoogleVertexAiEmbeddingsResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat( + parsedResults.embeddings(), + is( + List.of( + InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(List.of(-0.123F, 0.123F)), + InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding.of(List.of(-0.456F, 0.456F)) + ) + ) + ); + } + + public void testFromResponse_FailsWhenPredictionsFieldIsNotPresent() { + String responseJson = """ + { + "not_predictions": [ + { + "embeddings": { + "statistics": { + "truncated": false, + "token_count": 6 + }, + "values": [ + -0.123, + 0.123 + ] + } + }, + { + "embeddings": { + "statistics": { + "truncated": false, + "token_count": 6 + }, + "values": [ + -0.456, + 0.456 + ] + } + } + ] + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> GoogleVertexAiEmbeddingsResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [predictions] in Google Vertex AI embeddings response")); + } + + public void testFromResponse_FailsWhenEmbeddingsFieldIsNotPresent() { + String responseJson = """ + { + "predictions": [ + { + "not_embeddings": { + "statistics": { + "truncated": false, + "token_count": 6 + }, + "values": [ + -0.123, + 0.123 + ] + } + } + ] + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> GoogleVertexAiEmbeddingsResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [embeddings] in Google Vertex AI embeddings response")); + } + + public void testFromResponse_FailsWhenValuesFieldIsNotPresent() { + String responseJson = """ + { + "predictions": [ + { + "embeddings": { + "statistics": { + "truncated": false, + "token_count": 6 + }, + "not_values": [ + -0.123, + 0.123 + ] + } + } + ] + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> GoogleVertexAiEmbeddingsResponseEntity.fromResponse( + mock(Request.class), + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [values] in Google Vertex AI embeddings response")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiErrorResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiErrorResponseEntityTests.java new file mode 100644 index 0000000000000..e2c9ebed2c164 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiErrorResponseEntityTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.googlevertexai; + +import org.apache.http.HttpResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.external.http.HttpResult; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class GoogleVertexAiErrorResponseEntityTests extends ESTestCase { + + private static HttpResult getMockResult(String jsonString) { + var response = mock(HttpResponse.class); + return new HttpResult(response, Strings.toUTF8Bytes(jsonString)); + } + + public void testErrorResponse_ExtractsError() { + var result = getMockResult(""" + { + "error": { + "code": 400, + "message": "error message", + "status": "INVALID_ARGUMENT", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.BadRequest", + "fieldViolations": [ + { + "description": "Invalid JSON payload received. Unknown name \\"abc\\": Cannot find field." + } + ] + } + ] + } + } + """); + + var error = GoogleVertexAiErrorResponseEntity.fromResponse(result); + assertNotNull(error); + assertThat(error.getErrorMessage(), is("error message")); + } + + public void testErrorResponse_ReturnsNullIfNoError() { + var result = getMockResult(""" + { + "foo": "bar" + } + """); + + var error = GoogleVertexAiErrorResponseEntity.fromResponse(result); + assertNull(error); + } + + public void testErrorResponse_ReturnsNullIfNotJson() { + var result = getMockResult("error message"); + + var error = GoogleVertexAiErrorResponseEntity.fromResponse(result); + assertNull(error); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntityTests.java new file mode 100644 index 0000000000000..32450e3facfd0 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntityTests.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response.googlevertexai; + +import org.apache.http.HttpResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; +import org.elasticsearch.xpack.inference.external.http.HttpResult; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class GoogleVertexAiRerankResponseEntityTests extends ESTestCase { + + public void testFromResponse_CreatesResultsForASingleItem() throws IOException { + String responseJson = """ + { + "records": [ + { + "id": "2", + "title": "title 2", + "content": "content 2", + "score": 0.97 + } + ] + } + """; + + RankedDocsResults parsedResults = GoogleVertexAiRerankResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat(parsedResults.getRankedDocs(), is(List.of(new RankedDocsResults.RankedDoc(0, 0.97F, "content 2")))); + } + + public void testFromResponse_CreatesResultsForMultipleItems() throws IOException { + String responseJson = """ + { + "records": [ + { + "id": "2", + "title": "title 2", + "content": "content 2", + "score": 0.97 + }, + { + "id": "1", + "title": "title 1", + "content": "content 1", + "score": 0.90 + } + ] + } + """; + + RankedDocsResults parsedResults = GoogleVertexAiRerankResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ); + + assertThat( + parsedResults.getRankedDocs(), + is(List.of(new RankedDocsResults.RankedDoc(0, 0.97F, "content 2"), new RankedDocsResults.RankedDoc(1, 0.90F, "content 1"))) + ); + } + + public void testFromResponse_FailsWhenRecordsFieldIsNotPresent() { + String responseJson = """ + { + "not_records": [ + { + "id": "2", + "title": "title 2", + "content": "content 2", + "score": 0.97 + }, + { + "id": "1", + "title": "title 1", + "content": "content 1", + "score": 0.90 + } + ] + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> GoogleVertexAiRerankResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [records] in Google Vertex AI rerank response")); + } + + public void testFromResponse_FailsWhenContentFieldIsNotPresent() { + String responseJson = """ + { + "records": [ + { + "id": "2", + "title": "title 2", + "content": "content 2", + "score": 0.97 + }, + { + "id": "1", + "title": "title 1", + "not_content": "content 1", + "score": 0.97 + } + ] + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> GoogleVertexAiRerankResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [content] in Google Vertex AI rerank response")); + } + + public void testFromResponse_FailsWhenScoreFieldIsNotPresent() { + String responseJson = """ + { + "records": [ + { + "id": "2", + "title": "title 2", + "content": "content 2", + "not_score": 0.97 + }, + { + "id": "1", + "title": "title 1", + "content": "content 1", + "score": 0.96 + } + ] + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> GoogleVertexAiRerankResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat(thrownException.getMessage(), is("Failed to find required field [score] in Google Vertex AI rerank response")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index aacd72d8f1703..1cae8d981313f 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -42,8 +43,10 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper; +import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.LeafNestedDocuments; @@ -61,6 +64,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.BiConsumer; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.CHUNKED_EMBEDDINGS_FIELD; @@ -178,36 +182,10 @@ public void testDynamicUpdate() throws IOException { final String fieldName = "semantic"; final String inferenceId = "test_service"; - MapperService mapperService = createMapperService(mapping(b -> {})); - mapperService.merge( - "_doc", - new CompressedXContent( - Strings.toString(PutMappingRequest.simpleMapping(fieldName, "type=semantic_text,inference_id=" + inferenceId)) - ), - MapperService.MergeReason.MAPPING_UPDATE - ); - - SemanticTextField semanticTextField = new SemanticTextField( + MapperService mapperService = mapperServiceForFieldWithModelSettings( fieldName, - List.of(), - new SemanticTextField.InferenceResult( - inferenceId, - new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null), - List.of() - ), - XContentType.JSON - ); - XContentBuilder builder = JsonXContent.contentBuilder().startObject(); - builder.field(semanticTextField.fieldName()); - builder.value(semanticTextField); - builder.endObject(); - - SourceToParse sourceToParse = new SourceToParse("test", BytesReference.bytes(builder), XContentType.JSON); - ParsedDocument parsedDocument = mapperService.documentMapper().parse(sourceToParse); - mapperService.merge( - "_doc", - parsedDocument.dynamicMappingsUpdate().toCompressedXContent(), - MapperService.MergeReason.MAPPING_UPDATE + inferenceId, + new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null, null) ); assertSemanticTextField(mapperService, fieldName, true); } @@ -280,6 +258,7 @@ public void testUpdateModelSettings() throws IOException { .field("task_type", "text_embedding") .field("dimensions", 10) .field("similarity", "cosine") + .field("element_type", "float") .endObject() .endObject() ) @@ -290,7 +269,7 @@ public void testUpdateModelSettings() throws IOException { containsString( "Cannot update parameter [model_settings] " + "from [task_type=sparse_embedding] " - + "to [task_type=text_embedding, dimensions=10, similarity=cosine]" + + "to [task_type=text_embedding, dimensions=10, similarity=cosine, element_type=float]" ) ); } @@ -314,7 +293,7 @@ static void assertSemanticTextField(MapperService mapperService, String fieldNam .getNestedMappers() .get(getChunksFieldName(fieldName)); assertThat(chunksMapper, equalTo(semanticFieldMapper.fieldType().getChunksField())); - assertThat(chunksMapper.name(), equalTo(getChunksFieldName(fieldName))); + assertThat(chunksMapper.fullPath(), equalTo(getChunksFieldName(fieldName))); Mapper textMapper = chunksMapper.getMapper(CHUNKED_TEXT_FIELD); assertNotNull(textMapper); assertThat(textMapper, instanceOf(KeywordFieldMapper.class)); @@ -328,7 +307,7 @@ static void assertSemanticTextField(MapperService mapperService, String fieldNam assertThat(embeddingsMapper, instanceOf(FieldMapper.class)); FieldMapper embeddingsFieldMapper = (FieldMapper) embeddingsMapper; assertTrue(embeddingsFieldMapper.fieldType() == mapperService.mappingLookup().getFieldType(getEmbeddingsFieldName(fieldName))); - assertThat(embeddingsMapper.name(), equalTo(getEmbeddingsFieldName(fieldName))); + assertThat(embeddingsMapper.fullPath(), equalTo(getEmbeddingsFieldName(fieldName))); switch (semanticFieldMapper.fieldType().getModelSettings().taskType()) { case SPARSE_EMBEDDING -> assertThat(embeddingsMapper, instanceOf(SparseVectorFieldMapper.class)); case TEXT_EMBEDDING -> assertThat(embeddingsMapper, instanceOf(DenseVectorFieldMapper.class)); @@ -449,7 +428,7 @@ public void testMissingInferenceId() throws IOException { source( b -> b.startObject("field") .startObject(INFERENCE_FIELD) - .field(MODEL_SETTINGS_FIELD, new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null)) + .field(MODEL_SETTINGS_FIELD, new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null, null)) .field(CHUNKS_FIELD, List.of()) .endObject() .endObject() @@ -491,6 +470,122 @@ public void testMissingTaskType() throws IOException { assertThat(ex.getCause().getMessage(), containsString("failed to parse field [model_settings]")); } + public void testDenseVectorElementType() throws IOException { + final String fieldName = "field"; + final String inferenceId = "test_service"; + + BiConsumer assertMapperService = (m, e) -> { + Mapper mapper = m.mappingLookup().getMapper(fieldName); + assertThat(mapper, instanceOf(SemanticTextFieldMapper.class)); + SemanticTextFieldMapper semanticTextFieldMapper = (SemanticTextFieldMapper) mapper; + assertThat(semanticTextFieldMapper.fieldType().getModelSettings().elementType(), equalTo(e)); + }; + + MapperService floatMapperService = mapperServiceForFieldWithModelSettings( + fieldName, + inferenceId, + new SemanticTextField.ModelSettings( + TaskType.TEXT_EMBEDDING, + 1024, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ) + ); + assertMapperService.accept(floatMapperService, DenseVectorFieldMapper.ElementType.FLOAT); + + MapperService byteMapperService = mapperServiceForFieldWithModelSettings( + fieldName, + inferenceId, + new SemanticTextField.ModelSettings( + TaskType.TEXT_EMBEDDING, + 1024, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.BYTE + ) + ); + assertMapperService.accept(byteMapperService, DenseVectorFieldMapper.ElementType.BYTE); + } + + private MapperService mapperServiceForFieldWithModelSettings( + String fieldName, + String inferenceId, + SemanticTextField.ModelSettings modelSettings + ) throws IOException { + MapperService mapperService = createMapperService(mapping(b -> {})); + mapperService.merge( + "_doc", + new CompressedXContent( + Strings.toString(PutMappingRequest.simpleMapping(fieldName, "type=semantic_text,inference_id=" + inferenceId)) + ), + MapperService.MergeReason.MAPPING_UPDATE + ); + + SemanticTextField semanticTextField = new SemanticTextField( + fieldName, + List.of(), + new SemanticTextField.InferenceResult(inferenceId, modelSettings, List.of()), + XContentType.JSON + ); + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + builder.field(semanticTextField.fieldName()); + builder.value(semanticTextField); + builder.endObject(); + + SourceToParse sourceToParse = new SourceToParse("test", BytesReference.bytes(builder), XContentType.JSON); + ParsedDocument parsedDocument = mapperService.documentMapper().parse(sourceToParse); + mapperService.merge( + "_doc", + parsedDocument.dynamicMappingsUpdate().toCompressedXContent(), + MapperService.MergeReason.MAPPING_UPDATE + ); + return mapperService; + } + + public void testExistsQuerySparseVector() throws IOException { + final String fieldName = "semantic"; + final String inferenceId = "test_service"; + + MapperService mapperService = mapperServiceForFieldWithModelSettings( + fieldName, + inferenceId, + new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null, null) + ); + + Mapper mapper = mapperService.mappingLookup().getMapper(fieldName); + assertNotNull(mapper); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(mapperService); + Query existsQuery = ((SemanticTextFieldMapper) mapper).fieldType().existsQuery(searchExecutionContext); + assertThat(existsQuery, instanceOf(ESToParentBlockJoinQuery.class)); + } + + public void testExistsQueryDenseVector() throws IOException { + final String fieldName = "semantic"; + final String inferenceId = "test_service"; + + MapperService mapperService = mapperServiceForFieldWithModelSettings( + fieldName, + inferenceId, + new SemanticTextField.ModelSettings( + TaskType.TEXT_EMBEDDING, + 1024, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ) + ); + + Mapper mapper = mapperService.mappingLookup().getMapper(fieldName); + assertNotNull(mapper); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(mapperService); + Query existsQuery = ((SemanticTextFieldMapper) mapper).fieldType().existsQuery(searchExecutionContext); + assertThat(existsQuery, instanceOf(ESToParentBlockJoinQuery.class)); + } + + @Override + protected void assertExistsQuery(MappedFieldType fieldType, Query query, LuceneDocument fields) { + // Until a doc is indexed, the query is rewritten as match no docs + assertThat(query, instanceOf(MatchNoDocsQuery.class)); + } + private static void addSemanticTextMapping(XContentBuilder mappingBuilder, String fieldName, String modelId) throws IOException { mappingBuilder.startObject(fieldName); mappingBuilder.field("type", SemanticTextFieldMapper.CONTENT_TYPE); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java index 6d8b3ab4fa28e..2a64f77e28756 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.SimilarityMeasure; @@ -106,36 +107,55 @@ protected boolean supportsUnknownFields() { public void testModelSettingsValidation() { NullPointerException npe = expectThrows(NullPointerException.class, () -> { - new SemanticTextField.ModelSettings(null, 10, SimilarityMeasure.COSINE); + new SemanticTextField.ModelSettings(null, 10, SimilarityMeasure.COSINE, DenseVectorFieldMapper.ElementType.FLOAT); }); assertThat(npe.getMessage(), equalTo("task type must not be null")); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { - new SemanticTextField.ModelSettings(TaskType.COMPLETION, 10, SimilarityMeasure.COSINE); + new SemanticTextField.ModelSettings( + TaskType.COMPLETION, + 10, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ); }); assertThat(ex.getMessage(), containsString("Wrong [task_type]")); ex = expectThrows( IllegalArgumentException.class, - () -> { new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, 10, null); } + () -> { new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, 10, null, null); } ); assertThat(ex.getMessage(), containsString("[dimensions] is not allowed")); ex = expectThrows(IllegalArgumentException.class, () -> { - new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, SimilarityMeasure.COSINE); + new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, SimilarityMeasure.COSINE, null); }); assertThat(ex.getMessage(), containsString("[similarity] is not allowed")); ex = expectThrows(IllegalArgumentException.class, () -> { - new SemanticTextField.ModelSettings(TaskType.TEXT_EMBEDDING, null, SimilarityMeasure.COSINE); + new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null, DenseVectorFieldMapper.ElementType.FLOAT); + }); + assertThat(ex.getMessage(), containsString("[element_type] is not allowed")); + + ex = expectThrows(IllegalArgumentException.class, () -> { + new SemanticTextField.ModelSettings( + TaskType.TEXT_EMBEDDING, + null, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ); }); assertThat(ex.getMessage(), containsString("required [dimensions] field is missing")); - ex = expectThrows( - IllegalArgumentException.class, - () -> { new SemanticTextField.ModelSettings(TaskType.TEXT_EMBEDDING, 10, null); } - ); + ex = expectThrows(IllegalArgumentException.class, () -> { + new SemanticTextField.ModelSettings(TaskType.TEXT_EMBEDDING, 10, null, DenseVectorFieldMapper.ElementType.FLOAT); + }); assertThat(ex.getMessage(), containsString("required [similarity] field is missing")); + + ex = expectThrows(IllegalArgumentException.class, () -> { + new SemanticTextField.ModelSettings(TaskType.TEXT_EMBEDDING, 10, SimilarityMeasure.COSINE, null); + }); + assertThat(ex.getMessage(), containsString("required [element_type] field is missing")); } public static InferenceChunkedTextEmbeddingFloatResults randomInferenceChunkedTextEmbeddingFloatResults( diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/model/TestModel.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/model/TestModel.java index ced6e3ff43e2c..094952b8716b7 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/model/TestModel.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/model/TestModel.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; @@ -37,13 +38,14 @@ public static TestModel createRandomInstance() { } public static TestModel createRandomInstance(TaskType taskType) { - var dimensions = taskType == TaskType.TEXT_EMBEDDING ? randomInt(1024) : null; + var dimensions = taskType == TaskType.TEXT_EMBEDDING ? randomInt(64) : null; var similarity = taskType == TaskType.TEXT_EMBEDDING ? randomFrom(SimilarityMeasure.values()) : null; + var elementType = taskType == TaskType.TEXT_EMBEDDING ? randomFrom(DenseVectorFieldMapper.ElementType.values()) : null; return new TestModel( randomAlphaOfLength(4), taskType, randomAlphaOfLength(10), - new TestModel.TestServiceSettings(randomAlphaOfLength(4), dimensions, similarity), + new TestModel.TestServiceSettings(randomAlphaOfLength(4), dimensions, similarity, elementType), new TestModel.TestTaskSettings(randomInt(3)), new TestModel.TestSecretSettings(randomAlphaOfLength(4)) ); @@ -78,7 +80,12 @@ public TestSecretSettings getSecretSettings() { return (TestSecretSettings) super.getSecretSettings(); } - public record TestServiceSettings(String model, Integer dimensions, SimilarityMeasure similarity) implements ServiceSettings { + public record TestServiceSettings( + String model, + Integer dimensions, + SimilarityMeasure similarity, + DenseVectorFieldMapper.ElementType elementType + ) implements ServiceSettings { private static final String NAME = "test_service_settings"; @@ -95,11 +102,16 @@ public static TestServiceSettings fromMap(Map map) { throw validationException; } - return new TestServiceSettings(model, null, null); + return new TestServiceSettings(model, null, null, null); } public TestServiceSettings(StreamInput in) throws IOException { - this(in.readString(), in.readOptionalVInt(), in.readOptionalEnum(SimilarityMeasure.class)); + this( + in.readString(), + in.readOptionalVInt(), + in.readOptionalEnum(SimilarityMeasure.class), + in.readOptionalEnum(DenseVectorFieldMapper.ElementType.class) + ); } @Override @@ -112,6 +124,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (similarity != null) { builder.field("similarity", similarity); } + if (elementType != null) { + builder.field("element_type", elementType); + } builder.endObject(); return builder; } @@ -131,6 +146,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(model); out.writeOptionalVInt(dimensions); out.writeOptionalEnum(similarity); + out.writeOptionalEnum(elementType); } @Override @@ -147,6 +163,11 @@ public SimilarityMeasure similarity() { public Integer dimensions() { return dimensions; } + + @Override + public DenseVectorFieldMapper.ElementType elementType() { + return elementType; + } } public record TestTaskSettings(Integer temperature) implements TaskSettings { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilderTests.java index 07713952e36c3..c2b99923bae61 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilderTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -29,6 +30,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; @@ -79,6 +81,7 @@ public class SemanticQueryBuilderTests extends AbstractQueryTestCase randomFrom(DenseVectorFieldMapper.ElementType.values()) + ); // TODO: Support bit elements once KNN bit vector queries are available } @Override @@ -133,7 +140,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws private void applyRandomInferenceResults(MapperService mapperService) throws IOException { // Parse random inference results (or no inference results) to set up the dynamic inference result mappings under the semantic text // field - SourceToParse sourceToParse = buildSemanticTextFieldWithInferenceResults(inferenceResultType); + SourceToParse sourceToParse = buildSemanticTextFieldWithInferenceResults(inferenceResultType, denseVectorElementType); if (sourceToParse != null) { ParsedDocument parsedDocument = mapperService.documentMapper().parse(sourceToParse); mapperService.merge( @@ -194,7 +201,13 @@ private void assertSparseEmbeddingLuceneQuery(Query query) { private void assertTextEmbeddingLuceneQuery(Query query) { Query innerQuery = assertOuterBooleanQuery(query); - assertThat(innerQuery, instanceOf(KnnFloatVectorQuery.class)); + + Class expectedKnnQueryClass = switch (denseVectorElementType) { + case FLOAT -> KnnFloatVectorQuery.class; + case BYTE -> KnnByteVectorQuery.class; + default -> throw new IllegalStateException("Unhandled element type [" + denseVectorElementType + "]"); + }; + assertThat(innerQuery, instanceOf(expectedKnnQueryClass)); } private Query assertOuterBooleanQuery(Query query) { @@ -308,14 +321,18 @@ public void testSerializingQueryWhenNoInferenceId() throws IOException { assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); } - private static SourceToParse buildSemanticTextFieldWithInferenceResults(InferenceResultType inferenceResultType) throws IOException { + private static SourceToParse buildSemanticTextFieldWithInferenceResults( + InferenceResultType inferenceResultType, + DenseVectorFieldMapper.ElementType denseVectorElementType + ) throws IOException { SemanticTextField.ModelSettings modelSettings = switch (inferenceResultType) { case NONE -> null; - case SPARSE_EMBEDDING -> new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null); + case SPARSE_EMBEDDING -> new SemanticTextField.ModelSettings(TaskType.SPARSE_EMBEDDING, null, null, null); case TEXT_EMBEDDING -> new SemanticTextField.ModelSettings( TaskType.TEXT_EMBEDDING, TEXT_EMBEDDING_DIMENSION_COUNT, - SimilarityMeasure.COSINE + SimilarityMeasure.COSINE, + denseVectorElementType ); }; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankBuilderTests.java new file mode 100644 index 0000000000000..9ea28242f3605 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankBuilderTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class TextSimilarityRankBuilderTests extends AbstractXContentSerializingTestCase { + + @Override + protected TextSimilarityRankBuilder createTestInstance() { + return new TextSimilarityRankBuilder( + "my-field", + "my-inference-id", + "my-inference-text", + randomIntBetween(1, 1000), + randomBoolean() ? null : randomFloat() + ); + } + + @Override + protected TextSimilarityRankBuilder mutateInstance(TextSimilarityRankBuilder instance) throws IOException { + String field = instance.field(); + String inferenceId = instance.inferenceId(); + String inferenceText = instance.inferenceText(); + int rankWindowSize = instance.rankWindowSize(); + Float minScore = instance.minScore(); + + int mutate = randomIntBetween(0, 4); + switch (mutate) { + case 0 -> field = field + randomAlphaOfLength(2); + case 1 -> inferenceId = inferenceId + randomAlphaOfLength(2); + case 2 -> inferenceText = inferenceText + randomAlphaOfLength(2); + case 3 -> rankWindowSize = randomValueOtherThan(instance.rankWindowSize(), this::randomRankWindowSize); + case 4 -> minScore = randomValueOtherThan(instance.minScore(), this::randomMinScore); + default -> throw new IllegalStateException("Requested to modify more than available parameters."); + } + return new TextSimilarityRankBuilder(field, inferenceId, inferenceText, rankWindowSize, minScore); + } + + @Override + protected Writeable.Reader instanceReader() { + return TextSimilarityRankBuilder::new; + } + + @Override + protected TextSimilarityRankBuilder doParseInstance(XContentParser parser) throws IOException { + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.START_OBJECT); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.FIELD_NAME); + assertEquals(parser.currentName(), TextSimilarityRankBuilder.NAME); + TextSimilarityRankBuilder builder = TextSimilarityRankBuilder.PARSER.parse(parser, null); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.END_OBJECT); + parser.nextToken(); + assertNull(parser.currentToken()); + return builder; + } + + private int randomRankWindowSize() { + return randomIntBetween(0, 1000); + } + + private float randomMinScore() { + return randomFloatBetween(-1.0f, 1.0f, true); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "field": "my-field", + "inference_id": "my-inference-id", + "inference_text": "my-inference-text" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + TextSimilarityRankBuilder parsed = TextSimilarityRankBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java new file mode 100644 index 0000000000000..50d91a2271de6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TextSimilarityRankFeaturePhaseRankCoordinatorContextTests extends ESTestCase { + + private final Client mockClient = mock(Client.class); + + TextSimilarityRankFeaturePhaseRankCoordinatorContext subject = new TextSimilarityRankFeaturePhaseRankCoordinatorContext( + 10, + 0, + 100, + mockClient, + "my-inference-id", + "some query", + 0.0f + ); + + public void testComputeScores() { + RankFeatureDoc featureDoc1 = new RankFeatureDoc(0, 1.0f, 0); + featureDoc1.featureData("text 1"); + RankFeatureDoc featureDoc2 = new RankFeatureDoc(1, 3.0f, 1); + featureDoc2.featureData("text 2"); + RankFeatureDoc featureDoc3 = new RankFeatureDoc(2, 2.0f, 0); + featureDoc3.featureData("text 3"); + RankFeatureDoc[] featureDocs = new RankFeatureDoc[] { featureDoc1, featureDoc2, featureDoc3 }; + + subject.computeScores(featureDocs, new ActionListener<>() { + @Override + public void onResponse(float[] floats) { + assertArrayEquals(new float[] { 1.0f, 3.0f, 2.0f }, floats, 0.0f); + } + + @Override + public void onFailure(Exception e) { + fail(); + } + }); + + verify(mockClient).execute( + eq(InferenceAction.INSTANCE), + argThat(actionRequest -> ((InferenceAction.Request) actionRequest).getTaskType().equals(TaskType.RERANK)), + any() + ); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java new file mode 100644 index 0000000000000..a3605aade1fa1 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.rerank.AbstractRerankerIT; +import org.elasticsearch.xpack.inference.InferencePlugin; + +import java.util.Collection; +import java.util.List; + +public class TextSimilarityRankMultiNodeTests extends AbstractRerankerIT { + + private static final String inferenceId = "inference-id"; + private static final String inferenceText = "inference-text"; + private static final float minScore = 0.0f; + + @Override + protected RankBuilder getRankBuilder(int rankWindowSize, String rankFeatureField) { + return new TextSimilarityRankBuilder(rankFeatureField, inferenceId, inferenceText, rankWindowSize, minScore); + } + + @Override + protected RankBuilder getThrowingRankBuilder(int rankWindowSize, String rankFeatureField, ThrowingRankBuilderType type) { + return new TextSimilarityTestPlugin.ThrowingMockRequestActionBasedRankBuilder( + rankWindowSize, + rankFeatureField, + inferenceId, + inferenceText, + minScore, + type.name() + ); + } + + @Override + protected Collection> pluginsNeeded() { + return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java new file mode 100644 index 0000000000000..51f240be6fbeb --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.TestRetrieverBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class TextSimilarityRankRetrieverBuilderTests extends AbstractXContentTestCase { + + /** + * Creates a random {@link TextSimilarityRankRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static TextSimilarityRankRetrieverBuilder createRandomTextSimilarityRankRetrieverBuilder() { + return new TextSimilarityRankRetrieverBuilder( + TestRetrieverBuilder.createRandomTestRetrieverBuilder(), + randomAlphaOfLength(10), + randomAlphaOfLength(20), + randomAlphaOfLength(50), + randomIntBetween(1, 10000), + randomBoolean() ? null : randomFloatBetween(-1.0f, 1.0f, true) + ); + } + + @Override + protected TextSimilarityRankRetrieverBuilder createTestInstance() { + return createRandomTextSimilarityRankRetrieverBuilder(); + } + + @Override + protected TextSimilarityRankRetrieverBuilder doParseInstance(XContentParser parser) { + return TextSimilarityRankRetrieverBuilder.PARSER.apply( + parser, + new RetrieverParserContext( + new SearchUsage(), + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED + || nf == TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED + ) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(TextSimilarityRankBuilder.NAME), + (p, c) -> TextSimilarityRankRetrieverBuilder.PARSER.apply(p, (RetrieverParserContext) c) + ) + ); + return new NamedXContentRegistry(entries); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "retriever": { + "test": { + "value": "my-test-retriever" + } + }, + "field": "my-field", + "inference_id": "my-inference-id", + "inference_text": "my-inference-text" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + TextSimilarityRankRetrieverBuilder parsed = TextSimilarityRankRetrieverBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java new file mode 100644 index 0000000000000..7fbfe70dbcfe7 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.rerank.AbstractRerankerIT; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.InferencePlugin; +import org.junit.Before; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.hamcrest.Matchers.containsString; + +public class TextSimilarityRankTests extends ESSingleNodeTestCase { + + /** + * {@code TextSimilarityRankBuilder} that simulates an inference call that returns a different number of results as the input. + */ + public static class InvalidInferenceResultCountProvidingTextSimilarityRankBuilder extends TextSimilarityRankBuilder { + + public InvalidInferenceResultCountProvidingTextSimilarityRankBuilder( + String field, + String inferenceId, + String inferenceText, + int rankWindowSize, + Float minScore + ) { + super(field, inferenceId, inferenceText, rankWindowSize, minScore); + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext(int size, int from, Client client) { + return new TextSimilarityRankFeaturePhaseRankCoordinatorContext( + size, + from, + rankWindowSize(), + client, + inferenceId, + inferenceText, + minScore + ) { + @Override + protected InferenceAction.Request generateRequest(List docFeatures) { + return new InferenceAction.Request( + TaskType.RERANK, + inferenceId, + inferenceText, + docFeatures, + Map.of("invalidInferenceResultCount", true), + InputType.SEARCH, + InferenceAction.Request.DEFAULT_TIMEOUT + ); + } + }; + } + } + + private static final String inferenceId = "inference-id"; + private static final String inferenceText = "inference-text"; + private static final float minScore = 0.0f; + + private Client client; + + @Override + protected Collection> getPlugins() { + return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class); + } + + @Before + public void setup() { + // Initialize index with a few documents + client = client(); + for (int i = 0; i < 5; i++) { + client.prepareIndex("my-index").setId(String.valueOf(i)).setSource(Collections.singletonMap("text", String.valueOf(i))).get(); + } + client.admin().indices().prepareRefresh("my-index").get(); + } + + public void testRerank() { + ElasticsearchAssertions.assertNoFailuresAndResponse( + // Execute search with text similarity reranking + client.prepareSearch() + .setRankBuilder(new TextSimilarityRankBuilder("text", "my-rerank-model", "my query", 100, 0.0f)) + .setQuery(QueryBuilders.matchAllQuery()), + response -> { + // Verify order, rank and score of results + SearchHit[] hits = response.getHits().getHits(); + assertEquals(5, hits.length); + assertHitHasRankScoreAndText(hits[0], 1, 4.0f, "4"); + assertHitHasRankScoreAndText(hits[1], 2, 3.0f, "3"); + assertHitHasRankScoreAndText(hits[2], 3, 2.0f, "2"); + assertHitHasRankScoreAndText(hits[3], 4, 1.0f, "1"); + assertHitHasRankScoreAndText(hits[4], 5, 0.0f, "0"); + } + ); + } + + public void testRerankWithMinScore() { + ElasticsearchAssertions.assertNoFailuresAndResponse( + // Execute search with text similarity reranking + client.prepareSearch() + .setRankBuilder(new TextSimilarityRankBuilder("text", "my-rerank-model", "my query", 100, 1.5f)) + .setQuery(QueryBuilders.matchAllQuery()), + response -> { + // Verify order, rank and score of results + SearchHit[] hits = response.getHits().getHits(); + assertEquals(3, hits.length); + assertHitHasRankScoreAndText(hits[0], 1, 4.0f, "4"); + assertHitHasRankScoreAndText(hits[1], 2, 3.0f, "3"); + assertHitHasRankScoreAndText(hits[2], 3, 2.0f, "2"); + } + ); + } + + public void testRerankInferenceFailure() { + ElasticsearchAssertions.assertFailures( + // Execute search with text similarity reranking + client.prepareSearch() + .setRankBuilder( + new TextSimilarityTestPlugin.ThrowingMockRequestActionBasedRankBuilder( + 100, + "text", + "my-rerank-model", + "my query", + 0.7f, + AbstractRerankerIT.ThrowingRankBuilderType.THROWING_RANK_FEATURE_PHASE_COORDINATOR_CONTEXT.name() + ) + ) + .setQuery(QueryBuilders.matchAllQuery()), + RestStatus.INTERNAL_SERVER_ERROR, + containsString("Failed to execute phase [rank-feature], Computing updated ranks for results failed") + ); + } + + public void testRerankInferenceResultMismatch() { + ElasticsearchAssertions.assertFailures( + // Execute search with text similarity reranking + client.prepareSearch() + .setRankBuilder( + new InvalidInferenceResultCountProvidingTextSimilarityRankBuilder("text", "my-rerank-model", "my query", 100, 1.5f) + ) + .setQuery(QueryBuilders.matchAllQuery()), + RestStatus.INTERNAL_SERVER_ERROR, + containsString("Failed to execute phase [rank-feature], Computing updated ranks for results failed") + ); + } + + private static void assertHitHasRankScoreAndText(SearchHit hit, int expectedRank, float expectedScore, String expectedText) { + assertEquals(expectedRank, hit.getRank()); + assertEquals(expectedScore, hit.getScore(), 0.0f); + assertEquals(expectedText, Objects.requireNonNull(hit.getSourceAsMap()).get("text")); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java new file mode 100644 index 0000000000000..1e457a1a27c92 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java @@ -0,0 +1,287 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.textsimilarity; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.search.SearchPhaseController; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilterChain; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankShardResult; +import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; +import org.elasticsearch.search.rank.rerank.AbstractRerankerIT; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Plugin for text similarity tests. Defines a filter for modifying inference call behavior, as well as a {@code TextSimilarityRankBuilder} + * implementation that can be configured to throw an exception at various stages of processing. + */ +public class TextSimilarityTestPlugin extends Plugin implements ActionPlugin { + + private static final String inferenceId = "inference-id"; + private static final String inferenceText = "inference-text"; + private static final float minScore = 0.0f; + + private final SetOnce testFilter = new SetOnce<>(); + + @Override + public Collection createComponents(PluginServices services) { + testFilter.set(new TestFilter()); + return Collections.emptyList(); + } + + @Override + public List getActionFilters() { + return singletonList(testFilter.get()); + } + + private static final String THROWING_REQUEST_ACTION_BASED_RANK_BUILDER_NAME = "throwing_request_action_based_rank"; + + @Override + public List getNamedWriteables() { + return List.of( + new NamedWriteableRegistry.Entry( + RankBuilder.class, + THROWING_REQUEST_ACTION_BASED_RANK_BUILDER_NAME, + ThrowingMockRequestActionBasedRankBuilder::new + ) + ); + } + + /** + * Action filter that captures the inference action and injects a mock response. + */ + static class TestFilter implements ActionFilter { + + @Override + public int order() { + return Integer.MIN_VALUE; + } + + @Override + @SuppressWarnings("unchecked") + public void apply( + Task task, + String action, + Request request, + ActionListener listener, + ActionFilterChain chain + ) { + // For any other action than inference, execute normally + if (action.equals(InferenceAction.INSTANCE.name()) == false) { + chain.proceed(task, action, request, listener); + return; + } + + assert request instanceof InferenceAction.Request; + boolean shouldThrow = (boolean) ((InferenceAction.Request) request).getTaskSettings().getOrDefault("throwing", false); + boolean hasInvalidInferenceResultCount = (boolean) ((InferenceAction.Request) request).getTaskSettings() + .getOrDefault("invalidInferenceResultCount", false); + + if (shouldThrow) { + listener.onFailure(new UnsupportedOperationException("simulated failure")); + } else { + List rankedDocsResults = new ArrayList<>(); + List inputs = ((InferenceAction.Request) request).getInput(); + int resultCount = hasInvalidInferenceResultCount ? inputs.size() - 1 : inputs.size(); + for (int i = 0; i < resultCount; i++) { + rankedDocsResults.add(new RankedDocsResults.RankedDoc(i, Float.parseFloat(inputs.get(i)), inputs.get(i))); + } + ActionResponse response = new InferenceAction.Response(new RankedDocsResults(rankedDocsResults)); + listener.onResponse((Response) response); + } + } + } + + public static class ThrowingMockRequestActionBasedRankBuilder extends TextSimilarityRankBuilder { + + public static final ParseField FIELD_FIELD = new ParseField("field"); + public static final ParseField INFERENCE_ID = new ParseField("inference_id"); + public static final ParseField INFERENCE_TEXT = new ParseField("inference_text"); + public static final ParseField THROWING_TYPE_FIELD = new ParseField("throwing-type"); + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "throwing_request_action_based_rank", + args -> { + int rankWindowSize = args[0] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[0]; + String field = (String) args[1]; + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException("Field cannot be null or empty"); + } + final String inferenceId = (String) args[2]; + final String inferenceText = (String) args[3]; + final float minScore = (float) args[4]; + String throwingType = (String) args[5]; + return new ThrowingMockRequestActionBasedRankBuilder( + rankWindowSize, + field, + inferenceId, + inferenceText, + minScore, + throwingType + ); + } + ); + + static { + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareString(constructorArg(), INFERENCE_ID); + PARSER.declareString(constructorArg(), INFERENCE_TEXT); + PARSER.declareString(constructorArg(), THROWING_TYPE_FIELD); + } + + protected final AbstractRerankerIT.ThrowingRankBuilderType throwingRankBuilderType; + + public ThrowingMockRequestActionBasedRankBuilder( + final int rankWindowSize, + final String field, + final String inferenceId, + final String inferenceText, + final float minScore, + final String throwingType + ) { + super(field, inferenceId, inferenceText, rankWindowSize, minScore); + this.throwingRankBuilderType = AbstractRerankerIT.ThrowingRankBuilderType.valueOf(throwingType); + } + + public ThrowingMockRequestActionBasedRankBuilder(StreamInput in) throws IOException { + super(in); + this.throwingRankBuilderType = in.readEnum(AbstractRerankerIT.ThrowingRankBuilderType.class); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + super.doWriteTo(out); + out.writeEnum(throwingRankBuilderType); + } + + @Override + public void doXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + super.doXContent(builder, params); + builder.field(THROWING_TYPE_FIELD.getPreferredName(), throwingRankBuilderType); + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + if (this.throwingRankBuilderType == AbstractRerankerIT.ThrowingRankBuilderType.THROWING_QUERY_PHASE_SHARD_CONTEXT) + return new QueryPhaseRankShardContext(queries, rankWindowSize()) { + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + throw new UnsupportedOperationException("qps - simulated failure"); + } + }; + else { + return super.buildQueryPhaseShardContext(queries, from); + } + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + if (this.throwingRankBuilderType == AbstractRerankerIT.ThrowingRankBuilderType.THROWING_QUERY_PHASE_COORDINATOR_CONTEXT) + return new QueryPhaseRankCoordinatorContext(rankWindowSize()) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + throw new UnsupportedOperationException("qpc - simulated failure"); + } + }; + else { + return super.buildQueryPhaseCoordinatorContext(size, from); + } + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + if (this.throwingRankBuilderType == AbstractRerankerIT.ThrowingRankBuilderType.THROWING_RANK_FEATURE_PHASE_SHARD_CONTEXT) + return new RankFeaturePhaseRankShardContext(field()) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + throw new UnsupportedOperationException("rfs - simulated failure"); + } + }; + else { + return super.buildRankFeaturePhaseShardContext(); + } + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext(int size, int from, Client client) { + if (this.throwingRankBuilderType == AbstractRerankerIT.ThrowingRankBuilderType.THROWING_RANK_FEATURE_PHASE_COORDINATOR_CONTEXT) + return new TextSimilarityRankFeaturePhaseRankCoordinatorContext( + size, + from, + rankWindowSize(), + client, + inferenceId, + inferenceText, + minScore + ) { + @Override + protected InferenceAction.Request generateRequest(List docFeatures) { + return new InferenceAction.Request( + TaskType.RERANK, + inferenceId, + inferenceText, + docFeatures, + Map.of("throwing", true), + InputType.SEARCH, + InferenceAction.Request.DEFAULT_TIMEOUT + ); + } + }; + else { + return super.buildRankFeaturePhaseCoordinatorContext(size, from, client); + } + } + + @Override + public String getWriteableName() { + return "throwing_request_action_based_rank"; + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java index 599df8d1cfb3b..86af5e431d78d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingByteResults; import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.inference.results.InferenceTextEmbeddingByteResultsTests; import org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests; @@ -36,13 +37,16 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveLong; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalTimeValue; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredSecureString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.getEmbeddingSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -284,6 +288,46 @@ public void testRemoveAsOneOfTypesMissingReturnsNull() { assertThat(map.entrySet(), hasSize(3)); } + public void testRemoveAsAdaptiveAllocationsSettings() { + Map map = new HashMap<>( + Map.of("settings", new HashMap<>(Map.of("enabled", true, "min_number_of_allocations", 7, "max_number_of_allocations", 42))) + ); + ValidationException validationException = new ValidationException(); + assertThat( + ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "settings", validationException), + equalTo(new AdaptiveAllocationsSettings(true, 7, 42)) + ); + assertThat(validationException.validationErrors(), empty()); + + assertThat(ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "non-existent-key", validationException), nullValue()); + assertThat(validationException.validationErrors(), empty()); + + map = new HashMap<>(Map.of("settings", new HashMap<>(Map.of("enabled", false)))); + assertThat( + ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "settings", validationException), + equalTo(new AdaptiveAllocationsSettings(false, null, null)) + ); + assertThat(validationException.validationErrors(), empty()); + } + + public void testRemoveAsAdaptiveAllocationsSettings_exceptions() { + Map map = new HashMap<>( + Map.of("settings", new HashMap<>(Map.of("enabled", "YES!", "blah", 42, "max_number_of_allocations", -7))) + ); + ValidationException validationException = new ValidationException(); + ServiceUtils.removeAsAdaptiveAllocationsSettings(map, "settings", validationException); + assertThat(validationException.validationErrors(), hasSize(3)); + assertThat( + validationException.validationErrors().get(0), + containsString("field [enabled] is not of the expected type. The value [YES!] cannot be converted to a [Boolean]") + ); + assertThat(validationException.validationErrors().get(1), containsString("[settings] does not allow the setting [blah]")); + assertThat( + validationException.validationErrors().get(2), + containsString("[max_number_of_allocations] must be a positive integer or null") + ); + } + public void testConvertToUri_CreatesUri() { var validation = new ValidationException(); var uri = convertToUri("www.elastic.co", "name", "scope", validation); @@ -466,6 +510,41 @@ public void testExtractOptionalPositiveLong() { assertThat(validation.validationErrors(), hasSize(1)); } + public void testExtractRequiredPositiveInteger_ReturnsValue() { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", 1)); + var parsedInt = extractRequiredPositiveInteger(map, "key", "scope", validation); + + assertThat(validation.validationErrors(), hasSize(1)); + assertNotNull(parsedInt); + assertThat(parsedInt, is(1)); + assertTrue(map.isEmpty()); + } + + public void testExtractRequiredPositiveInteger_AddsErrorForNegativeValue() { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", -1)); + var parsedInt = extractRequiredPositiveInteger(map, "key", "scope", validation); + + assertThat(validation.validationErrors(), hasSize(2)); + assertNull(parsedInt); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(1), is("[scope] Invalid value [-1]. [key] must be a positive integer")); + } + + public void testExtractRequiredPositiveInteger_AddsErrorWhenKeyIsMissing() { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", -1)); + var parsedInt = extractRequiredPositiveInteger(map, "not_key", "scope", validation); + + assertThat(validation.validationErrors(), hasSize(2)); + assertNull(parsedInt); + assertThat(validation.validationErrors().get(1), is("[scope] does not contain the required setting [not_key]")); + } + public void testExtractOptionalEnum_ReturnsNull_WhenFieldDoesNotExist() { var validation = new ValidationException(); Map map = modifiableMap(Map.of("key", "value")); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettingsTests.java new file mode 100644 index 0000000000000..904851842a6c8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockSecretSettingsTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.hamcrest.CoreMatchers; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.ACCESS_KEY_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.SECRET_KEY_FIELD; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockSecretSettingsTests extends AbstractBWCWireSerializationTestCase { + + public void testIt_CreatesSettings_ReturnsNullFromMap_null() { + var secrets = AmazonBedrockSecretSettings.fromMap(null); + assertNull(secrets); + } + + public void testIt_CreatesSettings_FromMap_WithValues() { + var secrets = AmazonBedrockSecretSettings.fromMap( + new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest", SECRET_KEY_FIELD, "secrettest")) + ); + assertThat( + secrets, + is(new AmazonBedrockSecretSettings(new SecureString("accesstest".toCharArray()), new SecureString("secrettest".toCharArray()))) + ); + } + + public void testIt_CreatesSettings_FromMap_IgnoresExtraKeys() { + var secrets = AmazonBedrockSecretSettings.fromMap( + new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest", SECRET_KEY_FIELD, "secrettest", "extrakey", "extravalue")) + ); + assertThat( + secrets, + is(new AmazonBedrockSecretSettings(new SecureString("accesstest".toCharArray()), new SecureString("secrettest".toCharArray()))) + ); + } + + public void testIt_FromMap_ThrowsValidationException_AccessKeyMissing() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockSecretSettings.fromMap(new HashMap<>(Map.of(SECRET_KEY_FIELD, "secrettest"))) + ); + + assertThat( + thrownException.getMessage(), + containsString(Strings.format("[secret_settings] does not contain the required setting [%s]", ACCESS_KEY_FIELD)) + ); + } + + public void testIt_FromMap_ThrowsValidationException_SecretKeyMissing() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockSecretSettings.fromMap(new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest"))) + ); + + assertThat( + thrownException.getMessage(), + containsString(Strings.format("[secret_settings] does not contain the required setting [%s]", SECRET_KEY_FIELD)) + ); + } + + public void testToXContent_CreatesProperContent() throws IOException { + var secrets = AmazonBedrockSecretSettings.fromMap( + new HashMap<>(Map.of(ACCESS_KEY_FIELD, "accesstest", SECRET_KEY_FIELD, "secrettest")) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + secrets.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + assertThat(xContentResult, CoreMatchers.is(""" + {"access_key":"accesstest","secret_key":"secrettest"}""")); + } + + public static Map getAmazonBedrockSecretSettingsMap(String accessKey, String secretKey) { + return new HashMap(Map.of(ACCESS_KEY_FIELD, accessKey, SECRET_KEY_FIELD, secretKey)); + } + + @Override + protected AmazonBedrockSecretSettings mutateInstanceForVersion(AmazonBedrockSecretSettings instance, TransportVersion version) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockSecretSettings::new; + } + + @Override + protected AmazonBedrockSecretSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockSecretSettings mutateInstance(AmazonBedrockSecretSettings instance) throws IOException { + return randomValueOtherThan(instance, AmazonBedrockSecretSettingsTests::createRandom); + } + + private static AmazonBedrockSecretSettings createRandom() { + return new AmazonBedrockSecretSettings(new SecureString(randomAlphaOfLength(10)), new SecureString(randomAlphaOfLength(10))); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java new file mode 100644 index 0000000000000..ae413fc17425c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java @@ -0,0 +1,1136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.ChunkedInferenceServiceResults; +import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.ModelSecrets; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.core.inference.results.ChatCompletionResults; +import org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; +import org.elasticsearch.xpack.inference.Utils; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockMockRequestSender; +import org.elasticsearch.xpack.inference.external.amazonbedrock.AmazonBedrockRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.services.ServiceComponentsTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionServiceSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettings; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; +import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; +import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; +import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettingsTests.getAmazonBedrockSecretSettingsMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionServiceSettingsTests.createChatCompletionRequestSettingsMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettingsTests.createEmbeddingsRequestSettingsMap; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class AmazonBedrockServiceTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + threadPool = createThreadPool(inferenceUtilityPool()); + } + + @After + public void shutdown() throws IOException { + terminate(threadPool); + } + + public void testParseRequestConfig_CreatesAnAmazonBedrockModel() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + }, exception -> fail("Unexpected exception: " + exception)); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat(exception.getMessage(), is("The [amazonbedrock] service does not support task type [sparse_embedding]")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.SPARSE_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testCreateModel_ForEmbeddingsTask_InvalidProvider() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat(exception.getMessage(), is("The [text_embedding] task type for provider [anthropic] is not available")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "anthropic", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testCreateModel_TopKParameter_NotAvailable() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat(exception.getMessage(), is("The [top_k] task parameter is not available for provider [amazontitan]")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.COMPLETION, + getRequestConfigMap( + createChatCompletionRequestSettingsMap("region", "model", "amazontitan"), + getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createAmazonBedrockService()) { + var config = getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ); + + config.put("extra_key", "value"); + + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ElasticsearchStatusException.class)); + assertThat( + exception.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + } + ); + + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { + try (var service = createAmazonBedrockService()) { + var serviceSettings = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null); + serviceSettings.put("extra_key", "value"); + + var config = getRequestConfigMap(serviceSettings, Map.of(), getAmazonBedrockSecretSettingsMap("access", "secret")); + + ActionListener modelVerificationListener = ActionListener.wrap((model) -> { + fail("Expected exception, but got model: " + model); + }, e -> { + assertThat(e, instanceOf(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + }); + + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + taskSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + + ActionListener modelVerificationListener = ActionListener.wrap((model) -> { + fail("Expected exception, but got model: " + model); + }, e -> { + assertThat(e, instanceOf(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + }); + + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + secretSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + + ActionListener modelVerificationListener = ActionListener.wrap((model) -> { + fail("Expected exception, but got model: " + model); + }, e -> { + assertThat(e, instanceOf(ElasticsearchStatusException.class)); + assertThat( + e.getMessage(), + is("Model configuration contains settings [{extra_key=value}] unknown to the [amazonbedrock] service") + ); + }); + + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), modelVerificationListener); + } + } + + public void testParseRequestConfig_MovesModel() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + }, exception -> fail("Unexpected exception: " + exception)); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testCreateModel_ForEmbeddingsTask_DimensionsIsNotAllowed() throws IOException { + try (var service = createAmazonBedrockService()) { + ActionListener modelVerificationListener = ActionListener.wrap( + model -> fail("Expected exception, but got model: " + model), + exception -> { + assertThat(exception, instanceOf(ValidationException.class)); + assertThat(exception.getMessage(), containsString("[service_settings] does not allow the setting [dimensions]")); + } + ); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", 512, null, null, null), + Map.of(), + getAmazonBedrockSecretSettingsMap("access", "secret") + ), + Set.of(), + modelVerificationListener + ); + } + } + + public void testParsePersistedConfigWithSecrets_CreatesAnAmazonBedrockEmbeddingsModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "amazontitan"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, Map.of(), secretSettingsMap); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfigWithSecrets( + "id", + TaskType.SPARSE_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse stored model [id] for [amazonbedrock] service, please delete and add the service again") + ); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + secretSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.secrets().put("extra_key", "value"); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + settingsMap.put("extra_key", "value"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + taskSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.COMPLETION, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AmazonBedrockChatCompletionModel.class)); + + var settings = (AmazonBedrockChatCompletionServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.ANTHROPIC)); + var taskSettings = (AmazonBedrockChatCompletionTaskSettings) model.getTaskSettings(); + assertThat(taskSettings.temperature(), is(1.0)); + assertThat(taskSettings.topP(), is(0.5)); + assertThat(taskSettings.topK(), is(0.2)); + assertThat(taskSettings.maxNewTokens(), is(128)); + var secretSettings = (AmazonBedrockSecretSettings) model.getSecretSettings(); + assertThat(secretSettings.accessKey.toString(), is("access")); + assertThat(secretSettings.secretKey.toString(), is("secret")); + } + } + + public void testParsePersistedConfig_CreatesAnAmazonBedrockEmbeddingsModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_CreatesAnAmazonBedrockChatCompletionModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockChatCompletionModel.class)); + + var settings = (AmazonBedrockChatCompletionServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.ANTHROPIC)); + var taskSettings = (AmazonBedrockChatCompletionTaskSettings) model.getTaskSettings(); + assertThat(taskSettings.temperature(), is(1.0)); + assertThat(taskSettings.topP(), is(0.5)); + assertThat(taskSettings.topK(), is(0.2)); + assertThat(taskSettings.maxNewTokens(), is(128)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + + var thrownException = expectThrows( + ElasticsearchStatusException.class, + () -> service.parsePersistedConfig("id", TaskType.SPARSE_EMBEDDING, persistedConfig.config()) + ); + + assertThat( + thrownException.getMessage(), + is("Failed to parse stored model [id] for [amazonbedrock] service, please delete and add the service again") + ); + } + } + + public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createEmbeddingsRequestSettingsMap("region", "model", "amazontitan", null, false, null, null); + settingsMap.put("extra_key", "value"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, new HashMap(Map.of()), secretSettingsMap); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockEmbeddingsModel.class)); + + var settings = (AmazonBedrockEmbeddingsServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.AMAZONTITAN)); + assertNull(model.getSecretSettings()); + } + } + + public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { + try (var service = createAmazonBedrockService()) { + var settingsMap = createChatCompletionRequestSettingsMap("region", "model", "anthropic"); + var taskSettingsMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.2, 128); + taskSettingsMap.put("extra_key", "value"); + var secretSettingsMap = getAmazonBedrockSecretSettingsMap("access", "secret"); + + var persistedConfig = getPersistedConfigMap(settingsMap, taskSettingsMap, secretSettingsMap); + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AmazonBedrockChatCompletionModel.class)); + + var settings = (AmazonBedrockChatCompletionServiceSettings) model.getServiceSettings(); + assertThat(settings.region(), is("region")); + assertThat(settings.model(), is("model")); + assertThat(settings.provider(), is(AmazonBedrockProvider.ANTHROPIC)); + var taskSettings = (AmazonBedrockChatCompletionTaskSettings) model.getTaskSettings(); + assertThat(taskSettings.temperature(), is(1.0)); + assertThat(taskSettings.topP(), is(0.5)); + assertThat(taskSettings.topK(), is(0.2)); + assertThat(taskSettings.maxNewTokens(), is(128)); + assertNull(model.getSecretSettings()); + } + } + + public void testInfer_ThrowsErrorWhenModelIsNotAmazonBedrockModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + var mockModel = getInvalidModel("model_id", "service_name"); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + mockModel, + null, + List.of(""), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + thrownException.getMessage(), + is("The internal model was invalid, please delete the service [service_name] with id [model_id] and add it again.") + ); + + verify(factory, times(1)).createSender(); + verify(sender, times(1)).start(); + } + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_SendsRequest_ForEmbeddingsModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access", + "secret" + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + model, + null, + List.of("abc"), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), Matchers.is(buildExpectationFloat(List.of(new float[] { 0.123F, 0.678F })))); + } + } + } + + public void testInfer_SendsRequest_ForChatCompletionModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var mockResults = new ChatCompletionResults(List.of(new ChatCompletionResults.Result("test result"))); + requestSender.enqueue(mockResults); + + var model = AmazonBedrockChatCompletionModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access", + "secret" + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + model, + null, + List.of("abc"), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), Matchers.is(buildExpectationCompletion(List.of("test result")))); + } + } + } + + public void testCheckModelConfig_IncludesMaxTokens_ForEmbeddingsModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + 100, + null, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + var result = listener.actionGet(TIMEOUT); + assertThat( + result, + is( + AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 2, + false, + 100, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ) + ) + ); + var inputStrings = requestSender.getInputs(); + + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testCheckModelConfig_HasSimilarity_ForEmbeddingsModel() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + var result = listener.actionGet(TIMEOUT); + assertThat( + result, + is( + AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 2, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ) + ) + ); + var inputStrings = requestSender.getInputs(); + + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testCheckModelConfig_ThrowsIfEmbeddingSizeDoesNotMatchValueSetByUser() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 3, + true, + null, + null, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + + var exception = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + assertThat( + exception.getMessage(), + is( + "The retrieved embeddings size [2] does not match the size specified in the settings [3]. " + + "Please recreate the [id] configuration with the correct dimensions" + ) + ); + + var inputStrings = requestSender.getInputs(); + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensionsField_WhenNotSetByUser() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + var results = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(results); + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 100, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig(model, listener); + var result = listener.actionGet(TIMEOUT); + assertThat( + result, + is( + AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 2, + false, + null, + SimilarityMeasure.COSINE, + null, + "access", + "secret" + ) + ) + ); + var inputStrings = requestSender.getInputs(); + + MatcherAssert.assertThat(inputStrings, Matchers.is(List.of("how big"))); + } + } + } + + public void testInfer_UnauthorizedResponse() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "us-east-1", + "amazon.titan-embed-text-v1", + AmazonBedrockProvider.AMAZONTITAN, + "_INVALID_AWS_ACCESS_KEY_", + "_INVALID_AWS_SECRET_KEY_" + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + model, + null, + List.of("abc"), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var exceptionThrown = assertThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(exceptionThrown.getCause().getMessage(), containsString("The security token included in the request is invalid")); + } + } + + public void testChunkedInfer_CallsInfer_ConvertsFloatResponse_ForEmbeddings() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + try (var requestSender = (AmazonBedrockMockRequestSender) amazonBedrockFactory.createSender()) { + { + var mockResults1 = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.123F, 0.678F })) + ); + requestSender.enqueue(mockResults1); + } + { + var mockResults2 = new InferenceTextEmbeddingFloatResults( + List.of(new InferenceTextEmbeddingFloatResults.InferenceFloatEmbedding(new float[] { 0.223F, 0.278F })) + ); + requestSender.enqueue(mockResults2); + } + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access", + "secret" + ); + PlainActionFuture> listener = new PlainActionFuture<>(); + service.chunkedInfer( + model, + List.of("abc", "xyz"), + new HashMap<>(), + InputType.INGEST, + new ChunkingOptions(null, null), + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var results = listener.actionGet(TIMEOUT); + assertThat(results, hasSize(2)); + { + assertThat(results.get(0), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(0); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("abc", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 0.123F, 0.678F }, floatResult.chunks().get(0).embedding(), 0.0f); + } + { + assertThat(results.get(1), CoreMatchers.instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var floatResult = (InferenceChunkedTextEmbeddingFloatResults) results.get(1); + assertThat(floatResult.chunks(), hasSize(1)); + assertEquals("xyz", floatResult.chunks().get(0).matchedText()); + assertArrayEquals(new float[] { 0.223F, 0.278F }, floatResult.chunks().get(0).embedding(), 0.0f); + } + } + } + } + + private AmazonBedrockService createAmazonBedrockService() { + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + return new AmazonBedrockService(mock(HttpRequestSender.Factory.class), amazonBedrockFactory, createWithEmptySettings(threadPool)); + } + + private Map getRequestConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + var builtServiceSettings = new HashMap<>(); + builtServiceSettings.putAll(serviceSettings); + builtServiceSettings.putAll(secretSettings); + + return new HashMap<>( + Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) + ); + } + + private Utils.PersistedConfig getPersistedConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + + return new Utils.PersistedConfig( + new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), + new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModelTests.java new file mode 100644 index 0000000000000..22173943ff432 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionModelTests.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; + +public class AmazonBedrockChatCompletionModelTests extends ESTestCase { + public void testOverrideWith_OverridesWithoutValues() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 1.0, + 0.5, + 0.6, + 512, + null, + "access_key", + "secret_key" + ); + var requestTaskSettingsMap = getChatCompletionTaskSettingsMap(null, null, null, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettingsMap); + + assertThat(overriddenModel, sameInstance(overriddenModel)); + } + + public void testOverrideWith_temperature() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 1.0, + null, + null, + null, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(0.5, null, null, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + 0.5, + null, + null, + null, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public void testOverrideWith_topP() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + 0.8, + null, + null, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(null, 0.5, null, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + 0.5, + null, + null, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public void testOverrideWith_topK() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + 1.0, + null, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(null, null, 0.8, null); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + 0.8, + null, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public void testOverrideWith_maxNewTokens() { + var model = createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + 512, + null, + "access_key", + "secret_key" + ); + var requestTaskSettings = getChatCompletionTaskSettingsMap(null, null, null, 128); + var overriddenModel = AmazonBedrockChatCompletionModel.of(model, requestTaskSettings); + assertThat( + overriddenModel, + is( + createModel( + "id", + "region", + "model", + AmazonBedrockProvider.AMAZONTITAN, + null, + null, + null, + 128, + null, + "access_key", + "secret_key" + ) + ) + ); + } + + public static AmazonBedrockChatCompletionModel createModel( + String id, + String region, + String model, + AmazonBedrockProvider provider, + String accessKey, + String secretKey + ) { + return createModel(id, region, model, provider, null, null, null, null, null, accessKey, secretKey); + } + + public static AmazonBedrockChatCompletionModel createModel( + String id, + String region, + String model, + AmazonBedrockProvider provider, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens, + @Nullable RateLimitSettings rateLimitSettings, + String accessKey, + String secretKey + ) { + return new AmazonBedrockChatCompletionModel( + id, + TaskType.COMPLETION, + "amazonbedrock", + new AmazonBedrockChatCompletionServiceSettings(region, model, provider, rateLimitSettings), + new AmazonBedrockChatCompletionTaskSettings(temperature, topP, topK, maxNewTokens), + new AmazonBedrockSecretSettings(new SecureString(accessKey), new SecureString(secretKey)) + ); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..681088c786b6b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionRequestTaskSettingsTests.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.MatcherAssert; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockChatCompletionRequestTaskSettingsTests extends ESTestCase { + public void testFromMap_ReturnsEmptySettings_WhenTheMapIsEmpty() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of())); + assertThat(settings, is(AmazonBedrockChatCompletionRequestTaskSettings.EMPTY_SETTINGS)); + } + + public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of("key", "model"))); + assertThat(settings, is(AmazonBedrockChatCompletionRequestTaskSettings.EMPTY_SETTINGS)); + } + + public void testFromMap_ReturnsTemperature() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TEMPERATURE_FIELD, 0.1))); + assertThat(settings.temperature(), is(0.1)); + } + + public void testFromMap_ReturnsTopP() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_P_FIELD, 0.1))); + assertThat(settings.topP(), is(0.1)); + } + + public void testFromMap_ReturnsDoSample() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_K_FIELD, 0.3))); + assertThat(settings.topK(), is(0.3)); + } + + public void testFromMap_ReturnsMaxNewTokens() { + var settings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(MAX_NEW_TOKENS_FIELD, 512))); + assertThat(settings.maxNewTokens(), is(512)); + } + + public void testFromMap_TemperatureIsInvalidValue_ThrowsValidationException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TEMPERATURE_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [temperature] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopPIsInvalidValue_ThrowsValidationException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_P_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [top_p] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopKIsInvalidValue_ThrowsValidationException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(TOP_K_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString("field [top_k] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ); + } + + public void testFromMap_MaxTokensIsInvalidValue_ThrowsStatusException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(MAX_NEW_TOKENS_FIELD, "invalid"))) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString("field [max_new_tokens] is not of the expected type. The value [invalid] cannot be converted to a [Integer]") + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettingsTests.java new file mode 100644 index 0000000000000..90868530d8df8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionServiceSettingsTests.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; +import org.hamcrest.CoreMatchers; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockChatCompletionServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AmazonBedrockChatCompletionServiceSettings> { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var serviceSettings = AmazonBedrockChatCompletionServiceSettings.fromMap( + createChatCompletionRequestSettingsMap(region, model, provider), + ConfigurationParseContext.REQUEST + ); + + assertThat( + serviceSettings, + is(new AmazonBedrockChatCompletionServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, null)) + ); + } + + public void testFromMap_RequestWithRateLimit_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var settingsMap = createChatCompletionRequestSettingsMap(region, model, provider); + settingsMap.put(RateLimitSettings.FIELD_NAME, new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 3))); + + var serviceSettings = AmazonBedrockChatCompletionServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST); + + assertThat( + serviceSettings, + is(new AmazonBedrockChatCompletionServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, new RateLimitSettings(3))) + ); + } + + public void testFromMap_Persistent_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var settingsMap = createChatCompletionRequestSettingsMap(region, model, provider); + var serviceSettings = AmazonBedrockChatCompletionServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is(new AmazonBedrockChatCompletionServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, null)) + ); + } + + public void testToXContent_WritesAllValues() throws IOException { + var entity = new AmazonBedrockChatCompletionServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + new RateLimitSettings(3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":3}}""")); + } + + public static HashMap createChatCompletionRequestSettingsMap(String region, String model, String provider) { + return new HashMap(Map.of(REGION_FIELD, region, MODEL_FIELD, model, PROVIDER_FIELD, provider)); + } + + @Override + protected AmazonBedrockChatCompletionServiceSettings mutateInstanceForVersion( + AmazonBedrockChatCompletionServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockChatCompletionServiceSettings::new; + } + + @Override + protected AmazonBedrockChatCompletionServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockChatCompletionServiceSettings mutateInstance(AmazonBedrockChatCompletionServiceSettings instance) + throws IOException { + return randomValueOtherThan(instance, AmazonBedrockChatCompletionServiceSettingsTests::createRandom); + } + + private static AmazonBedrockChatCompletionServiceSettings createRandom() { + return new AmazonBedrockChatCompletionServiceSettings( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomFrom(AmazonBedrockProvider.values()), + RateLimitSettingsTests.createRandom() + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettingsTests.java new file mode 100644 index 0000000000000..0d5440c6d2cf8 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/completion/AmazonBedrockChatCompletionTaskSettingsTests.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MAX_NEW_TOKENS_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TEMPERATURE_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_K_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.TOP_P_FIELD; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockChatCompletionTaskSettingsTests extends AbstractBWCWireSerializationTestCase< + AmazonBedrockChatCompletionTaskSettings> { + + public void testFromMap_AllValues() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + assertEquals( + new AmazonBedrockChatCompletionTaskSettings(1.0, 0.5, 0.6, 512), + AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap) + ); + } + + public void testFromMap_TemperatureIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(TEMPERATURE_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [temperature] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopPIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(TOP_P_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [top_p] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ) + ); + } + + public void testFromMap_TopKIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(TOP_K_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString("field [top_k] is not of the expected type. The value [invalid] cannot be converted to a [Double]") + ); + } + + public void testFromMap_MaxNewTokensIsInvalidValue_ThrowsValidationException() { + var taskMap = getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512); + taskMap.put(MAX_NEW_TOKENS_FIELD, "invalid"); + + var thrownException = expectThrows(ValidationException.class, () -> AmazonBedrockChatCompletionTaskSettings.fromMap(taskMap)); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("field [max_new_tokens] is not of the expected type. The value [invalid] cannot be converted to a [Integer]") + ) + ); + } + + public void testFromMap_WithNoValues_DoesNotThrowException() { + var taskMap = AmazonBedrockChatCompletionTaskSettings.fromMap(new HashMap(Map.of())); + assertNull(taskMap.temperature()); + assertNull(taskMap.topP()); + assertNull(taskMap.topK()); + assertNull(taskMap.maxNewTokens()); + } + + public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, AmazonBedrockChatCompletionTaskSettings.EMPTY_SETTINGS); + MatcherAssert.assertThat(overrideSettings, is(settings)); + } + + public void testOverrideWith_UsesTemperatureOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(0.3, null, null, null) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(0.3, 0.5, 0.6, 512))); + } + + public void testOverrideWith_UsesTopPOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(null, 0.2, null, null) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(1.0, 0.2, 0.6, 512))); + } + + public void testOverrideWith_UsesDoSampleOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(null, null, 0.1, null) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(1.0, 0.5, 0.1, 512))); + } + + public void testOverrideWith_UsesMaxNewTokensOverride() { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + var overrideSettings = AmazonBedrockChatCompletionRequestTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(null, null, null, 128) + ); + var overriddenTaskSettings = AmazonBedrockChatCompletionTaskSettings.of(settings, overrideSettings); + MatcherAssert.assertThat(overriddenTaskSettings, is(new AmazonBedrockChatCompletionTaskSettings(1.0, 0.5, 0.6, 128))); + } + + public void testToXContent_WithoutParameters() throws IOException { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(null, null, null, null)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is("{}")); + } + + public void testToXContent_WithParameters() throws IOException { + var settings = AmazonBedrockChatCompletionTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1.0, 0.5, 0.6, 512)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"temperature":1.0,"top_p":0.5,"top_k":0.6,"max_new_tokens":512}""")); + } + + public static Map getChatCompletionTaskSettingsMap( + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Double topK, + @Nullable Integer maxNewTokens + ) { + var map = new HashMap(); + + if (temperature != null) { + map.put(TEMPERATURE_FIELD, temperature); + } + + if (topP != null) { + map.put(TOP_P_FIELD, topP); + } + + if (topK != null) { + map.put(TOP_K_FIELD, topK); + } + + if (maxNewTokens != null) { + map.put(MAX_NEW_TOKENS_FIELD, maxNewTokens); + } + + return map; + } + + @Override + protected AmazonBedrockChatCompletionTaskSettings mutateInstanceForVersion( + AmazonBedrockChatCompletionTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockChatCompletionTaskSettings::new; + } + + @Override + protected AmazonBedrockChatCompletionTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockChatCompletionTaskSettings mutateInstance(AmazonBedrockChatCompletionTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, AmazonBedrockChatCompletionTaskSettingsTests::createRandom); + } + + private static AmazonBedrockChatCompletionTaskSettings createRandom() { + return new AmazonBedrockChatCompletionTaskSettings( + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Integer[] { null, randomNonNegativeInt() }) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModelTests.java new file mode 100644 index 0000000000000..711e3cbb5a511 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsModelTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.EmptyTaskSettings; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; + +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; + +public class AmazonBedrockEmbeddingsModelTests extends ESTestCase { + + public void testCreateModel_withTaskSettings_shouldFail() { + var baseModel = createModel("id", "region", "model", AmazonBedrockProvider.AMAZONTITAN, "accesskey", "secretkey"); + var thrownException = assertThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsModel.of(baseModel, Map.of("testkey", "testvalue")) + ); + assertThat(thrownException.getMessage(), containsString("Amazon Bedrock embeddings model cannot have task settings")); + } + + // model creation only - no tests to define, but we want to have the public createModel + // method available + + public static AmazonBedrockEmbeddingsModel createModel( + String inferenceId, + String region, + String model, + AmazonBedrockProvider provider, + String accessKey, + String secretKey + ) { + return createModel(inferenceId, region, model, provider, null, false, null, null, new RateLimitSettings(240), accessKey, secretKey); + } + + public static AmazonBedrockEmbeddingsModel createModel( + String inferenceId, + String region, + String model, + AmazonBedrockProvider provider, + @Nullable Integer dimensions, + boolean dimensionsSetByUser, + @Nullable Integer maxTokens, + @Nullable SimilarityMeasure similarity, + RateLimitSettings rateLimitSettings, + String accessKey, + String secretKey + ) { + return new AmazonBedrockEmbeddingsModel( + inferenceId, + TaskType.TEXT_EMBEDDING, + "amazonbedrock", + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + provider, + dimensions, + dimensionsSetByUser, + maxTokens, + similarity, + rateLimitSettings + ), + new EmptyTaskSettings(), + new AmazonBedrockSecretSettings(new SecureString(accessKey), new SecureString(secretKey)) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettingsTests.java new file mode 100644 index 0000000000000..a100b89e1db6e --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsServiceSettingsTests.java @@ -0,0 +1,404 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProvider; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; +import org.hamcrest.CoreMatchers; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.DIMENSIONS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.MAX_INPUT_TOKENS; +import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.MODEL_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.PROVIDER_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockConstants.REGION_FIELD; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class AmazonBedrockEmbeddingsServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AmazonBedrockEmbeddingsServiceSettings> { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap( + createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, SimilarityMeasure.COSINE), + ConfigurationParseContext.REQUEST + ); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + null + ) + ) + ); + } + + public void testFromMap_RequestWithRateLimit_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, SimilarityMeasure.COSINE); + settingsMap.put(RateLimitSettings.FIELD_NAME, new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, 3))); + + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + new RateLimitSettings(3) + ) + ) + ); + } + + public void testFromMap_Request_DimensionsSetByUser_IsFalse_WhenDimensionsAreNotPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, SimilarityMeasure.COSINE); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + null + ) + ) + ); + } + + public void testFromMap_Request_DimensionsSetByUser_ShouldThrowWhenPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = 512; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, true, maxInputTokens, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString( + Strings.format("Validation Failed: 1: [service_settings] does not allow the setting [%s];", DIMENSIONS_SET_BY_USER) + ) + ); + } + + public void testFromMap_Request_Dimensions_ShouldThrowWhenPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var dims = 128; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, dims, null, null, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString(Strings.format("[service_settings] does not allow the setting [%s]", DIMENSIONS)) + ); + } + + public void testFromMap_Request_MaxTokensShouldBePositiveInteger() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var maxInputTokens = -128; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, null, maxInputTokens, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + MatcherAssert.assertThat( + thrownException.getMessage(), + containsString(Strings.format("[%s] must be a positive integer", MAX_INPUT_TOKENS)) + ); + } + + public void testFromMap_Persistent_CreatesSettingsCorrectly() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + var dims = 1536; + var maxInputTokens = 512; + + var settingsMap = createEmbeddingsRequestSettingsMap( + region, + model, + provider, + dims, + false, + maxInputTokens, + SimilarityMeasure.COSINE + ); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + dims, + false, + maxInputTokens, + SimilarityMeasure.COSINE, + null + ) + ) + ); + } + + public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsIsNull() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, true, null, null); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is(new AmazonBedrockEmbeddingsServiceSettings(region, model, AmazonBedrockProvider.AMAZONTITAN, null, true, null, null, null)) + ); + } + + public void testFromMap_PersistentContext_DoesNotThrowException_WhenSimilarityIsPresent() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, null, true, null, SimilarityMeasure.DOT_PRODUCT); + var serviceSettings = AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT); + + assertThat( + serviceSettings, + is( + new AmazonBedrockEmbeddingsServiceSettings( + region, + model, + AmazonBedrockProvider.AMAZONTITAN, + null, + true, + null, + SimilarityMeasure.DOT_PRODUCT, + null + ) + ) + ); + } + + public void testFromMap_PersistentContext_ThrowsException_WhenDimensionsSetByUserIsNull() { + var region = "region"; + var model = "model-id"; + var provider = "amazontitan"; + + var settingsMap = createEmbeddingsRequestSettingsMap(region, model, provider, 1, null, null, null); + + var exception = expectThrows( + ValidationException.class, + () -> AmazonBedrockEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.PERSISTENT) + ); + + assertThat( + exception.getMessage(), + containsString("Validation Failed: 1: [service_settings] does not contain the required setting [dimensions_set_by_user];") + ); + } + + public void testToXContent_WritesDimensionsSetByUserTrue() throws IOException { + var entity = new AmazonBedrockEmbeddingsServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + null, + true, + null, + null, + new RateLimitSettings(2) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":2},"dimensions_set_by_user":true}""")); + } + + public void testToXContent_WritesAllValues() throws IOException { + var entity = new AmazonBedrockEmbeddingsServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + 1024, + false, + 512, + null, + new RateLimitSettings(3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":3},"dimensions":1024,"max_input_tokens":512,"dimensions_set_by_user":false}""")); + } + + public void testToFilteredXContent_WritesAllValues_ExceptDimensionsSetByUser() throws IOException { + var entity = new AmazonBedrockEmbeddingsServiceSettings( + "testregion", + "testmodel", + AmazonBedrockProvider.AMAZONTITAN, + 1024, + false, + 512, + null, + new RateLimitSettings(3) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + var filteredXContent = entity.getFilteredXContentObject(); + filteredXContent.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, CoreMatchers.is(""" + {"region":"testregion","model":"testmodel","provider":"AMAZONTITAN",""" + """ + "rate_limit":{"requests_per_minute":3},"dimensions":1024,"max_input_tokens":512}""")); + } + + public static HashMap createEmbeddingsRequestSettingsMap( + String region, + String model, + String provider, + @Nullable Integer dimensions, + @Nullable Boolean dimensionsSetByUser, + @Nullable Integer maxTokens, + @Nullable SimilarityMeasure similarityMeasure + ) { + var map = new HashMap(Map.of(REGION_FIELD, region, MODEL_FIELD, model, PROVIDER_FIELD, provider)); + + if (dimensions != null) { + map.put(ServiceFields.DIMENSIONS, dimensions); + } + + if (dimensionsSetByUser != null) { + map.put(DIMENSIONS_SET_BY_USER, dimensionsSetByUser.equals(Boolean.TRUE)); + } + + if (maxTokens != null) { + map.put(ServiceFields.MAX_INPUT_TOKENS, maxTokens); + } + + if (similarityMeasure != null) { + map.put(SIMILARITY, similarityMeasure.toString()); + } + + return map; + } + + @Override + protected AmazonBedrockEmbeddingsServiceSettings mutateInstanceForVersion( + AmazonBedrockEmbeddingsServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AmazonBedrockEmbeddingsServiceSettings::new; + } + + @Override + protected AmazonBedrockEmbeddingsServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AmazonBedrockEmbeddingsServiceSettings mutateInstance(AmazonBedrockEmbeddingsServiceSettings instance) throws IOException { + return randomValueOtherThan(instance, AmazonBedrockEmbeddingsServiceSettingsTests::createRandom); + } + + private static AmazonBedrockEmbeddingsServiceSettings createRandom() { + return new AmazonBedrockEmbeddingsServiceSettings( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomFrom(AmazonBedrockProvider.values()), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomBoolean(), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomFrom(new SimilarityMeasure[] { null, randomFrom(SimilarityMeasure.values()) }), + RateLimitSettingsTests.createRandom() + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java new file mode 100644 index 0000000000000..5e32344ab3840 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java @@ -0,0 +1,537 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.inference.InferenceServiceResults; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.inference.action.InferenceAction; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSenderTests; +import org.elasticsearch.xpack.inference.external.http.sender.Sender; +import org.elasticsearch.xpack.inference.external.request.anthropic.AnthropicRequestUtils; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModelTests; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettings; +import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettingsTests; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.Utils.buildExpectationCompletions; +import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.getModelListenerForException; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; +import static org.elasticsearch.xpack.inference.Utils.getRequestConfigMap; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; +import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettingsTests.getSecretSettingsMap; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class AnthropicServiceTests extends ESTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testParseRequestConfig_CreatesACompletionModel() throws IOException { + var apiKey = "apiKey"; + var modelId = "model"; + + try (var service = createServiceWithMockSender()) { + ActionListener modelListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getSecretSettings().apiKey().toString(), is(apiKey)); + }, e -> fail("Model parsing should have succeeded, but failed: " + e.getMessage())); + + service.parseRequestConfig( + "id", + TaskType.COMPLETION, + getRequestConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + new HashMap<>(Map.of(AnthropicServiceFields.MAX_TOKENS, 1)), + getSecretSettingsMap(apiKey) + ), + Set.of(), + modelListener + ); + } + } + + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { + try (var service = createServiceWithMockSender()) { + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "The [anthropic] service does not support task type [sparse_embedding]" + ); + + service.parseRequestConfig( + "id", + TaskType.SPARSE_EMBEDDING, + getRequestConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, "model")), + new HashMap<>(Map.of()), + getSecretSettingsMap("secret") + ), + Set.of(), + failureListener + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createServiceWithMockSender()) { + var config = getRequestConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, "model")), + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, null, null, null), + getSecretSettingsMap("secret") + ); + config.put("extra_key", "value"); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [anthropic] service" + ); + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), failureListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { + try (var service = createServiceWithMockSender()) { + Map serviceSettings = new HashMap<>(Map.of(ServiceFields.MODEL_ID, "model")); + serviceSettings.put("extra_key", "value"); + + var config = getRequestConfigMap( + serviceSettings, + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, null, null, null), + getSecretSettingsMap("api_key") + ); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [anthropic] service" + ); + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), failureListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { + try (var service = createServiceWithMockSender()) { + var taskSettingsMap = AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, null, null, null); + taskSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, "model")), + taskSettingsMap, + getSecretSettingsMap("secret") + ); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [anthropic] service" + ); + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), failureListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { + try (var service = createServiceWithMockSender()) { + Map secretSettings = getSecretSettingsMap("secret"); + secretSettings.put("extra_key", "value"); + + var config = getRequestConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, "model")), + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, null, null, null), + secretSettings + ); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [anthropic] service" + ); + service.parseRequestConfig("id", TaskType.COMPLETION, config, Set.of(), failureListener); + } + } + + public void testParsePersistedConfigWithSecrets_CreatesACompletionModel() throws IOException { + var modelId = "model"; + var apiKey = "apiKey"; + + try (var service = createServiceWithMockSender()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3), + getSecretSettingsMap(apiKey) + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.COMPLETION, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertThat(completionModel.getSecretSettings().apiKey().toString(), is(apiKey)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { + var modelId = "model"; + var apiKey = "apiKey"; + + try (var service = createServiceWithMockSender()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3), + getSecretSettingsMap(apiKey) + ); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.COMPLETION, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertThat(completionModel.getSecretSettings().apiKey(), is(apiKey)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException { + var modelId = "model"; + var apiKey = "apiKey"; + + try (var service = createServiceWithMockSender()) { + var secretSettingsMap = getSecretSettingsMap(apiKey); + secretSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3), + secretSettingsMap + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.COMPLETION, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertThat(completionModel.getSecretSettings().apiKey().toString(), is(apiKey)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { + var modelId = "model"; + var apiKey = "apiKey"; + + try (var service = createServiceWithMockSender()) { + Map serviceSettingsMap = new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)); + serviceSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + serviceSettingsMap, + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3), + getSecretSettingsMap(apiKey) + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.COMPLETION, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertThat(completionModel.getSecretSettings().apiKey().toString(), is(apiKey)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { + var modelId = "model"; + var apiKey = "apiKey"; + + try (var service = createServiceWithMockSender()) { + Map taskSettings = AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3); + taskSettings.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + taskSettings, + getSecretSettingsMap(apiKey) + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.COMPLETION, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertThat(completionModel.getSecretSettings().apiKey().toString(), is(apiKey)); + } + } + + public void testParsePersistedConfig_CreatesACompletionModel() throws IOException { + var modelId = "model"; + + try (var service = createServiceWithMockSender()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3) + ); + + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertNull(completionModel.getSecretSettings()); + } + } + + public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { + var modelId = "model"; + + try (var service = createServiceWithMockSender()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3) + ); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertNull(completionModel.getSecretSettings()); + } + } + + public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { + var modelId = "model"; + + try (var service = createServiceWithMockSender()) { + Map serviceSettingsMap = new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)); + serviceSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + serviceSettingsMap, + AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3) + ); + + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertNull(completionModel.getSecretSettings()); + } + } + + public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { + var modelId = "model"; + + try (var service = createServiceWithMockSender()) { + Map taskSettings = AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, 1.0, 2.1, 3); + taskSettings.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap(new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), taskSettings); + + var model = service.parsePersistedConfig("id", TaskType.COMPLETION, persistedConfig.config()); + + assertThat(model, instanceOf(AnthropicChatCompletionModel.class)); + + var completionModel = (AnthropicChatCompletionModel) model; + assertThat(completionModel.getServiceSettings().modelId(), is(modelId)); + assertThat(completionModel.getTaskSettings(), is(new AnthropicChatCompletionTaskSettings(1, 1.0, 2.1, 3))); + assertNull(completionModel.getSecretSettings()); + } + } + + public void testInfer_ThrowsErrorWhenModelIsNotAValidModel() throws IOException { + var sender = mock(Sender.class); + + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var mockModel = getInvalidModel("model_id", "service_name"); + + try (var service = new AnthropicService(factory, createWithEmptySettings(threadPool))) { + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + mockModel, + null, + List.of(""), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + + var thrownException = expectThrows(ElasticsearchStatusException.class, () -> listener.actionGet(TIMEOUT)); + MatcherAssert.assertThat( + thrownException.getMessage(), + is("The internal model was invalid, please delete the service [service_name] with id [model_id] and add it again.") + ); + + verify(factory, times(1)).createSender(); + verify(sender, times(1)).start(); + } + + verify(sender, times(1)).close(); + verifyNoMoreInteractions(factory); + verifyNoMoreInteractions(sender); + } + + public void testInfer_SendsCompletionRequest() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new AnthropicService(senderFactory, createWithEmptySettings(threadPool))) { + String responseJson = """ + { + "id": "msg_01XzZQmG41BMGe5NZ5p2vEWb", + "type": "message", + "role": "assistant", + "model": "claude-3-opus-20240229", + "content": [ + { + "type": "text", + "text": "result" + } + ], + "stop_reason": "end_turn", + "stop_sequence": null, + "usage": { + "input_tokens": 16, + "output_tokens": 326 + } + } + """; + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); + + var model = AnthropicChatCompletionModelTests.createChatCompletionModel(getUrl(webServer), "secret", "model", 1); + PlainActionFuture listener = new PlainActionFuture<>(); + service.infer( + model, + null, + List.of("input"), + new HashMap<>(), + InputType.INGEST, + InferenceAction.Request.DEFAULT_TIMEOUT, + listener + ); + var result = listener.actionGet(TIMEOUT); + + assertThat(result.asMap(), is(buildExpectationCompletions(List.of("result")))); + var request = webServer.requests().get(0); + assertNull(request.getUri().getQuery()); + assertThat(request.getHeader(HttpHeaders.CONTENT_TYPE), Matchers.equalTo(XContentType.JSON.mediaType())); + assertThat(request.getHeader(AnthropicRequestUtils.X_API_KEY), Matchers.equalTo("secret")); + assertThat( + request.getHeader(AnthropicRequestUtils.VERSION), + Matchers.equalTo(AnthropicRequestUtils.ANTHROPIC_VERSION_2023_06_01) + ); + + var requestMap = entityAsMap(request.getBody()); + assertThat( + requestMap, + is(Map.of("messages", List.of(Map.of("role", "user", "content", "input")), "model", "model", "max_tokens", 1)) + ); + } + } + + private AnthropicService createServiceWithMockSender() { + return new AnthropicService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModelTests.java new file mode 100644 index 0000000000000..85535b1400b86 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionModelTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; + +import java.util.Map; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; + +public class AnthropicChatCompletionModelTests extends ESTestCase { + + public void testOverrideWith_OverridesMaxInput() { + var model = createChatCompletionModel("url", "api_key", "model_name", 0); + var requestTaskSettingsMap = AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap(1, null, null, null); + + var overriddenModel = AnthropicChatCompletionModel.of(model, requestTaskSettingsMap); + + assertThat(overriddenModel, is(createChatCompletionModel("url", "api_key", "model_name", 1))); + } + + public void testOverrideWith_EmptyMap() { + var model = createChatCompletionModel("url", "api_key", "model_name", 0); + + var requestTaskSettingsMap = Map.of(); + + var overriddenModel = AnthropicChatCompletionModel.of(model, requestTaskSettingsMap); + assertThat(overriddenModel, sameInstance(model)); + } + + public void testOverrideWith_NullMap() { + var model = createChatCompletionModel("url", "api_key", "model_name", 0); + + var overriddenModel = AnthropicChatCompletionModel.of(model, null); + assertThat(overriddenModel, sameInstance(model)); + } + + public static AnthropicChatCompletionModel createChatCompletionModel(String url, String apiKey, String modelName, int maxTokens) { + return new AnthropicChatCompletionModel( + "id", + TaskType.COMPLETION, + "service", + url, + new AnthropicChatCompletionServiceSettings(modelName, null), + new AnthropicChatCompletionTaskSettings(maxTokens, null, null, null), + new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) + ); + } + + public static AnthropicChatCompletionModel createChatCompletionModel(String apiKey, String modelName, int maxTokens) { + return new AnthropicChatCompletionModel( + "id", + TaskType.COMPLETION, + "service", + new AnthropicChatCompletionServiceSettings(modelName, null), + new AnthropicChatCompletionTaskSettings(maxTokens, null, null, null), + new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..86a6b36947f25 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionRequestTaskSettingsTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap; +import static org.hamcrest.Matchers.is; + +public class AnthropicChatCompletionRequestTaskSettingsTests extends ESTestCase { + + public void testFromMap_ReturnsEmptySettings_WhenTheMapIsEmpty() { + var settings = AnthropicChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of())); + assertNull(settings.maxTokens()); + } + + public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() { + var settings = AnthropicChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of("key", "value"))); + assertNull(settings.maxTokens()); + } + + public void testFromMap_ReturnsMaxTokens() { + var settings = AnthropicChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(AnthropicServiceFields.MAX_TOKENS, 1))); + assertThat(settings.maxTokens(), is(1)); + } + + public void testFromMap_ReturnsAllValues() { + var settings = AnthropicChatCompletionRequestTaskSettings.fromMap(getChatCompletionTaskSettingsMap(1, -1.1, 0.1, 1)); + assertThat(settings.maxTokens(), is(1)); + assertThat(settings.temperature(), is(-1.1)); + assertThat(settings.topP(), is(0.1)); + assertThat(settings.topK(), is(1)); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionServiceSettingsTests.java new file mode 100644 index 0000000000000..11c2cd56e7955 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionServiceSettingsTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class AnthropicChatCompletionServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AnthropicChatCompletionServiceSettings> { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var modelId = "some model"; + + var serviceSettings = AnthropicChatCompletionServiceSettings.fromMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId)), + ConfigurationParseContext.PERSISTENT + ); + + assertThat(serviceSettings, is(new AnthropicChatCompletionServiceSettings(modelId, null))); + } + + public void testFromMap_Request_CreatesSettingsCorrectly_WithRateLimit() { + var modelId = "some model"; + var rateLimit = 2; + var serviceSettings = AnthropicChatCompletionServiceSettings.fromMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + RateLimitSettings.FIELD_NAME, + new HashMap<>(Map.of(RateLimitSettings.REQUESTS_PER_MINUTE_FIELD, rateLimit)) + ) + ), + ConfigurationParseContext.PERSISTENT + ); + + assertThat(serviceSettings, is(new AnthropicChatCompletionServiceSettings(modelId, new RateLimitSettings(2)))); + } + + public void testToXContent_WritesAllValues() throws IOException { + var serviceSettings = new AnthropicChatCompletionServiceSettings("model", null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + serviceSettings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"model_id":"model","rate_limit":{"requests_per_minute":50}}""")); + } + + public void testToXContent_WritesAllValues_WithCustomRateLimit() throws IOException { + var serviceSettings = new AnthropicChatCompletionServiceSettings("model", new RateLimitSettings(2)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + serviceSettings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"model_id":"model","rate_limit":{"requests_per_minute":2}}""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return AnthropicChatCompletionServiceSettings::new; + } + + @Override + protected AnthropicChatCompletionServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AnthropicChatCompletionServiceSettings mutateInstance(AnthropicChatCompletionServiceSettings instance) throws IOException { + return randomValueOtherThan(instance, AnthropicChatCompletionServiceSettingsTests::createRandom); + } + + private static AnthropicChatCompletionServiceSettings createRandom() { + var modelId = randomAlphaOfLength(8); + + return new AnthropicChatCompletionServiceSettings(modelId, RateLimitSettingsTests.createRandom()); + } + + @Override + protected AnthropicChatCompletionServiceSettings mutateInstanceForVersion( + AnthropicChatCompletionServiceSettings instance, + TransportVersion version + ) { + return instance; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionTaskSettingsTests.java new file mode 100644 index 0000000000000..78762af6eee8c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/completion/AnthropicChatCompletionTaskSettingsTests.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.anthropic.completion; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.anthropic.AnthropicServiceFields; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class AnthropicChatCompletionTaskSettingsTests extends AbstractBWCWireSerializationTestCase { + + public static Map getChatCompletionTaskSettingsMap( + @Nullable Integer maxTokens, + @Nullable Double temperature, + @Nullable Double topP, + @Nullable Integer topK + ) { + var map = new HashMap(); + + if (maxTokens != null) { + map.put(AnthropicServiceFields.MAX_TOKENS, maxTokens); + } + + if (temperature != null) { + map.put(AnthropicServiceFields.TEMPERATURE_FIELD, temperature); + } + + if (topP != null) { + map.put(AnthropicServiceFields.TOP_P_FIELD, topP); + } + + if (topK != null) { + map.put(AnthropicServiceFields.TOP_K_FIELD, topK); + } + + return map; + } + + public static AnthropicChatCompletionTaskSettings createRandom() { + return new AnthropicChatCompletionTaskSettings(randomNonNegativeInt(), randomDouble(), randomDouble(), randomInt()); + } + + public void testFromMap_WithMaxTokens() { + assertEquals( + new AnthropicChatCompletionTaskSettings(1, null, null, null), + AnthropicChatCompletionTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(1, null, null, null), + ConfigurationParseContext.REQUEST + ) + ); + } + + public void testFromMap_AllValues() { + assertEquals( + new AnthropicChatCompletionTaskSettings(1, -1.1, 2.2, 3), + AnthropicChatCompletionTaskSettings.fromMap( + getChatCompletionTaskSettingsMap(1, -1.1, 2.2, 3), + ConfigurationParseContext.REQUEST + ) + ); + } + + public void testFromMap_WithoutMaxTokens_ThrowsException() { + var thrownException = expectThrows( + ValidationException.class, + () -> AnthropicChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of()), ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + is("Validation Failed: 1: [task_settings] does not contain the required setting [max_tokens];") + ); + } + + public void testOf_KeepsOriginalValuesWithOverridesAreEmpty() { + var taskSettings = new AnthropicChatCompletionTaskSettings(1, null, null, null); + + var overriddenTaskSettings = AnthropicChatCompletionTaskSettings.of( + taskSettings, + AnthropicChatCompletionRequestTaskSettings.EMPTY_SETTINGS + ); + assertThat(overriddenTaskSettings, is(taskSettings)); + } + + public void testOf_UsesOverriddenSettings() { + var taskSettings = new AnthropicChatCompletionTaskSettings(1, -1.2, 2.1, 3); + + var requestTaskSettings = new AnthropicChatCompletionRequestTaskSettings(2, 3.0, 4.0, 4); + + var overriddenTaskSettings = AnthropicChatCompletionTaskSettings.of(taskSettings, requestTaskSettings); + assertThat(overriddenTaskSettings, is(new AnthropicChatCompletionTaskSettings(2, 3.0, 4.0, 4))); + } + + @Override + protected AnthropicChatCompletionTaskSettings mutateInstanceForVersion( + AnthropicChatCompletionTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + @Override + protected Writeable.Reader instanceReader() { + return AnthropicChatCompletionTaskSettings::new; + } + + @Override + protected AnthropicChatCompletionTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AnthropicChatCompletionTaskSettings mutateInstance(AnthropicChatCompletionTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index d26b02ddba62b..4cc91249ad244 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; @@ -60,8 +59,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -1069,18 +1068,6 @@ private Map getRequestConfigMap( ); } - private PersistedConfig getPersistedConfigMap( - Map serviceSettings, - Map taskSettings, - Map secretSettings - ) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) - ); - } - private static Map getEmbeddingsServiceSettingsMap( String target, String provider, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java index 05388192b2f14..c857a22e52996 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java @@ -170,6 +170,92 @@ public void testFromMap_Persistent_CreatesSettingsCorrectly() { ); } + public void testFromMap_ThrowsException_WhenDimensionsAreZero() { + var target = "http://sometarget.local"; + var provider = "openai"; + var endpointType = "token"; + var dimensions = 0; + + var settingsMap = createRequestSettingsMap(target, provider, endpointType, dimensions, true, null, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureAiStudioEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [dimensions] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenDimensionsAreNegative() { + var target = "http://sometarget.local"; + var provider = "openai"; + var endpointType = "token"; + var dimensions = randomNegativeInt(); + + var settingsMap = createRequestSettingsMap(target, provider, endpointType, dimensions, true, null, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureAiStudioEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [dimensions] must be a positive integer;", + dimensions + ) + ) + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreZero() { + var target = "http://sometarget.local"; + var provider = "openai"; + var endpointType = "token"; + var maxInputTokens = 0; + + var settingsMap = createRequestSettingsMap(target, provider, endpointType, null, true, maxInputTokens, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureAiStudioEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [max_input_tokens] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreNegative() { + var target = "http://sometarget.local"; + var provider = "openai"; + var endpointType = "token"; + var maxInputTokens = randomNegativeInt(); + + var settingsMap = createRequestSettingsMap(target, provider, endpointType, null, true, maxInputTokens, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureAiStudioEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [max_input_tokens] must be a positive integer;", + maxInputTokens + ) + ) + ); + } + public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsIsNull() { var target = "http://sometarget.local"; var provider = "openai"; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettingsTests.java index 697814f1dc7e1..e08365e7ca3bf 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiSecretSettingsTests.java @@ -7,15 +7,16 @@ package org.elasticsearch.xpack.inference.services.azureopenai; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import java.io.IOException; import java.util.HashMap; @@ -26,7 +27,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -public class AzureOpenAiSecretSettingsTests extends AbstractWireSerializingTestCase { +public class AzureOpenAiSecretSettingsTests extends AbstractBWCWireSerializationTestCase { public static AzureOpenAiSecretSettings createRandom() { return new AzureOpenAiSecretSettings(randomSecureStringOfLength(15), randomSecureStringOfLength(15)); @@ -143,6 +144,11 @@ protected AzureOpenAiSecretSettings mutateInstance(AzureOpenAiSecretSettings ins return randomValueOtherThan(instance, AzureOpenAiSecretSettingsTests::createRandom); } + @Override + protected AzureOpenAiSecretSettings mutateInstanceForVersion(AzureOpenAiSecretSettings instance, TransportVersion version) { + return instance; + } + public static Map getAzureOpenAiSecretSettingsMap(@Nullable String apiKey, @Nullable String entraId) { var map = new HashMap(); if (apiKey != null) { @@ -153,4 +159,5 @@ public static Map getAzureOpenAiSecretSettingsMap(@Nullable Stri } return map; } + } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index c3e8eb5c621d2..57bae3f172e6d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; @@ -54,8 +53,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -1159,23 +1158,4 @@ private Map getRequestConfigMap( Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) ); } - - private PersistedConfig getPersistedConfigMap( - Map serviceSettings, - Map taskSettings, - Map secretSettings - ) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) - ); - } - - private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettingsTests.java index cbb9eea223802..8b754257e9d83 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettingsTests.java @@ -203,6 +203,92 @@ public void testFromMap_Request_DimensionsSetByUser_ShouldThrowWhenPresent() { ); } + public void testFromMap_ThrowsException_WhenDimensionsAreZero() { + var resourceName = "this-resource"; + var deploymentId = "this-deployment"; + var apiVersion = "2024-01-01"; + var dimensions = 0; + + var settingsMap = getRequestAzureOpenAiServiceSettingsMap(resourceName, deploymentId, apiVersion, dimensions, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureOpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [dimensions] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenDimensionsAreNegative() { + var resourceName = "this-resource"; + var deploymentId = "this-deployment"; + var apiVersion = "2024-01-01"; + var dimensions = randomNegativeInt(); + + var settingsMap = getRequestAzureOpenAiServiceSettingsMap(resourceName, deploymentId, apiVersion, dimensions, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureOpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [dimensions] must be a positive integer;", + dimensions + ) + ) + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreZero() { + var resourceName = "this-resource"; + var deploymentId = "this-deployment"; + var apiVersion = "2024-01-01"; + var maxInputTokens = 0; + + var settingsMap = getRequestAzureOpenAiServiceSettingsMap(resourceName, deploymentId, apiVersion, null, maxInputTokens); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureOpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [max_input_tokens] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreNegative() { + var resourceName = "this-resource"; + var deploymentId = "this-deployment"; + var apiVersion = "2024-01-01"; + var maxInputTokens = randomNegativeInt(); + + var settingsMap = getRequestAzureOpenAiServiceSettingsMap(resourceName, deploymentId, apiVersion, null, maxInputTokens); + + var thrownException = expectThrows( + ValidationException.class, + () -> AzureOpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [max_input_tokens] must be a positive integer;", + maxInputTokens + ) + ) + ); + } + public void testFromMap_Persistent_CreatesSettingsCorrectly() { var resourceName = "this-resource"; var deploymentId = "this-deployment"; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index e28ca71c30ff8..f9db4313dcead 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; @@ -58,8 +57,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -1344,10 +1343,7 @@ public void testChunkedInfer_BatchesCalls_Bytes() throws IOException { } public void testDefaultSimilarity() { - assertEquals(SimilarityMeasure.DOT_PRODUCT, CohereService.defaultSimilarity(null)); - assertEquals(SimilarityMeasure.DOT_PRODUCT, CohereService.defaultSimilarity(CohereEmbeddingType.FLOAT)); - assertEquals(SimilarityMeasure.COSINE, CohereService.defaultSimilarity(CohereEmbeddingType.INT8)); - assertEquals(SimilarityMeasure.COSINE, CohereService.defaultSimilarity(CohereEmbeddingType.BYTE)); + assertEquals(SimilarityMeasure.DOT_PRODUCT, CohereService.defaultSimilarity()); } private Map getRequestConfigMap( @@ -1376,23 +1372,4 @@ private CohereService createCohereService() { return new CohereService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } - private PersistedConfig getPersistedConfigMap( - Map serviceSettings, - Map taskSettings, - Map secretSettings - ) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) - ); - } - - private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettingsTests.java index 1ce5a9fb12833..a729ac8e225b5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettingsTests.java @@ -7,48 +7,58 @@ package org.elasticsearch.xpack.inference.services.cohere.rerank; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.inference.SimilarityMeasure; -import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; -import org.elasticsearch.xpack.inference.InferenceNamedWriteablesProvider; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettings; import org.elasticsearch.xpack.inference.services.cohere.CohereServiceSettingsTests; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.is; +import static org.elasticsearch.xpack.inference.MatchersUtils.equalToIgnoringWhitespaceInJsonString; -public class CohereRerankServiceSettingsTests extends AbstractWireSerializingTestCase { +public class CohereRerankServiceSettingsTests extends AbstractBWCWireSerializationTestCase { public static CohereRerankServiceSettings createRandom() { - var commonSettings = CohereServiceSettingsTests.createRandom(); + return createRandom(randomFrom(new RateLimitSettings[] { null, RateLimitSettingsTests.createRandom() })); + } - return new CohereRerankServiceSettings(commonSettings); + public static CohereRerankServiceSettings createRandom(@Nullable RateLimitSettings rateLimitSettings) { + return new CohereRerankServiceSettings( + randomFrom(new String[] { null, Strings.format("http://%s.com", randomAlphaOfLength(8)) }), + randomFrom(new String[] { null, randomAlphaOfLength(10) }), + rateLimitSettings + ); } public void testToXContent_WritesAllValues() throws IOException { - var serviceSettings = new CohereRerankServiceSettings( - new CohereServiceSettings("url", SimilarityMeasure.COSINE, 5, 10, "model_id", new RateLimitSettings(3)) - ); + var url = "http://www.abc.com"; + var model = "model"; + + var serviceSettings = new CohereRerankServiceSettings(url, model, null); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); serviceSettings.toXContent(builder, null); String xContentResult = Strings.toString(builder); - // TODO we probably shouldn't allow configuring these fields for reranking - assertThat(xContentResult, is(""" - {"url":"url","similarity":"cosine","dimensions":5,"max_input_tokens":10,"model_id":"model_id",""" + """ - "rate_limit":{"requests_per_minute":3}}""")); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "url":"http://www.abc.com", + "model_id":"model", + "rate_limit": { + "requests_per_minute": 10000 + } + } + """)); } @Override @@ -67,11 +77,12 @@ protected CohereRerankServiceSettings mutateInstance(CohereRerankServiceSettings } @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(); - entries.addAll(new MlInferenceNamedXContentProvider().getNamedWriteables()); - entries.addAll(InferenceNamedWriteablesProvider.getNamedWriteables()); - return new NamedWriteableRegistry(entries); + protected CohereRerankServiceSettings mutateInstanceForVersion(CohereRerankServiceSettings instance, TransportVersion version) { + if (version.before(TransportVersions.ML_INFERENCE_RATE_LIMIT_SETTINGS_ADDED)) { + // We always default to the same rate limit settings, if a node is on a version before rate limits were introduced + return new CohereRerankServiceSettings(instance.uri(), instance.modelId(), CohereServiceSettings.DEFAULT_RATE_LIMIT_SETTINGS); + } + return instance; } public static Map getServiceSettingsMap(@Nullable String url, @Nullable String model) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalTextEmbeddingServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalTextEmbeddingServiceSettingsTests.java new file mode 100644 index 0000000000000..c06aad881d2ab --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandInternalTextEmbeddingServiceSettingsTests.java @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceFields; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.ServiceFields.ELEMENT_TYPE; +import static org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings.NUM_ALLOCATIONS; +import static org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings.NUM_THREADS; +import static org.hamcrest.Matchers.is; + +public class CustomElandInternalTextEmbeddingServiceSettingsTests extends AbstractWireSerializingTestCase< + CustomElandInternalTextEmbeddingServiceSettings> { + + public static CustomElandInternalTextEmbeddingServiceSettings createRandom() { + var numAllocations = randomIntBetween(1, 10); + var numThreads = randomIntBetween(1, 10); + var modelId = randomAlphaOfLength(8); + SimilarityMeasure similarityMeasure = SimilarityMeasure.COSINE; + Integer dims = null; + var setDimensions = randomBoolean(); + if (setDimensions) { + dims = 123; + } + + var elementType = randomFrom(DenseVectorFieldMapper.ElementType.values()); + + return new CustomElandInternalTextEmbeddingServiceSettings( + numAllocations, + numThreads, + modelId, + null, + dims, + similarityMeasure, + elementType + ); + } + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var modelId = "model-foo"; + var similarity = SimilarityMeasure.DOT_PRODUCT.toString(); + var numAllocations = 1; + var numThreads = 1; + var serviceSettings = CustomElandInternalTextEmbeddingServiceSettings.fromMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + NUM_ALLOCATIONS, + numAllocations, + NUM_THREADS, + numThreads, + ServiceFields.SIMILARITY, + similarity, + ELEMENT_TYPE, + DenseVectorFieldMapper.ElementType.FLOAT.toString() + ) + ), + ConfigurationParseContext.REQUEST + ); + + assertThat( + serviceSettings, + is( + new CustomElandInternalTextEmbeddingServiceSettings( + numAllocations, + numThreads, + modelId, + null, + null, + SimilarityMeasure.DOT_PRODUCT, + DenseVectorFieldMapper.ElementType.FLOAT + ) + ) + ); + } + + public void testFromMap_Request_DoesNotDefaultSimilarityElementType() { + var modelId = "model-foo"; + var numAllocations = 1; + var numThreads = 1; + var serviceSettings = CustomElandInternalTextEmbeddingServiceSettings.fromMap( + new HashMap<>(Map.of(ServiceFields.MODEL_ID, modelId, NUM_ALLOCATIONS, numAllocations, NUM_THREADS, numThreads)), + ConfigurationParseContext.REQUEST + ); + + assertThat( + serviceSettings, + is( + new CustomElandInternalTextEmbeddingServiceSettings( + numAllocations, + numThreads, + modelId, + null, + null, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ) + ) + ); + } + + public void testFromMap_Request_IgnoresDimensions() { + var modelId = "model-foo"; + var similarity = SimilarityMeasure.DOT_PRODUCT.toString(); + var numAllocations = 1; + var numThreads = 1; + var serviceSettings = CustomElandInternalTextEmbeddingServiceSettings.fromMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + NUM_ALLOCATIONS, + numAllocations, + NUM_THREADS, + numThreads, + ServiceFields.SIMILARITY, + similarity, + ELEMENT_TYPE, + DenseVectorFieldMapper.ElementType.FLOAT.toString(), + ServiceFields.DIMENSIONS, + 1 + ) + ), + ConfigurationParseContext.REQUEST + ); + + assertThat( + serviceSettings, + is( + new CustomElandInternalTextEmbeddingServiceSettings( + numAllocations, + numThreads, + modelId, + null, + null, + SimilarityMeasure.DOT_PRODUCT, + DenseVectorFieldMapper.ElementType.FLOAT + ) + ) + ); + } + + public void testFromMap_Persistent_CreatesSettingsCorrectly() { + var modelId = "model-foo"; + var similarity = SimilarityMeasure.DOT_PRODUCT.toString(); + var numAllocations = 1; + var numThreads = 1; + var serviceSettings = CustomElandInternalTextEmbeddingServiceSettings.fromMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + NUM_ALLOCATIONS, + numAllocations, + NUM_THREADS, + numThreads, + ServiceFields.SIMILARITY, + similarity, + ELEMENT_TYPE, + DenseVectorFieldMapper.ElementType.FLOAT.toString(), + ServiceFields.DIMENSIONS, + 1 + ) + ), + ConfigurationParseContext.PERSISTENT + ); + + assertThat( + serviceSettings, + is( + new CustomElandInternalTextEmbeddingServiceSettings( + numAllocations, + numThreads, + modelId, + null, + 1, + SimilarityMeasure.DOT_PRODUCT, + DenseVectorFieldMapper.ElementType.FLOAT + ) + ) + ); + } + + public void testToXContent_WritesAllValues() throws IOException { + var entity = new CustomElandInternalTextEmbeddingServiceSettings( + 1, + 1, + "model_id", + null, + 100, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.BYTE + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"num_allocations":1,"num_threads":1,"model_id":"model_id","dimensions":100,"similarity":"cosine","element_type":"byte"}""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return CustomElandInternalTextEmbeddingServiceSettings::new; + } + + @Override + protected CustomElandInternalTextEmbeddingServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected CustomElandInternalTextEmbeddingServiceSettings mutateInstance(CustomElandInternalTextEmbeddingServiceSettings instance) + throws IOException { + return randomValueOtherThan(instance, CustomElandInternalTextEmbeddingServiceSettingsTests::createRandom); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index dfcfe466c2a3b..ad1910cb9fc0a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.InferenceResults; @@ -40,6 +41,7 @@ import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.MlChunkedTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.ml.inference.results.MlChunkedTextEmbeddingFloatResultsTests; +import org.elasticsearch.xpack.core.ml.inference.results.MlTextEmbeddingResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; import org.elasticsearch.xpack.core.utils.FloatConversionUtils; @@ -52,6 +54,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -66,6 +69,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -130,7 +134,8 @@ public void testParseRequestConfig() { var e5ServiceSettings = new MultilingualE5SmallInternalServiceSettings( 1, 4, - ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID, + null ); service.parseRequestConfig( @@ -286,9 +291,9 @@ public void testParseRequestConfig_Rerank() { ); ActionListener modelListener = ActionListener.wrap(model -> { - assertThat(model, instanceOf(CustomElandModel.class)); + assertThat(model, instanceOf(CustomElandRerankModel.class)); assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); - assertThat(model.getServiceSettings(), instanceOf(ElasticsearchInternalServiceSettings.class)); + assertThat(model.getServiceSettings(), instanceOf(CustomElandInternalServiceSettings.class)); assertEquals(returnDocs, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); }, e -> { fail("Model parsing failed " + e.getMessage()); }); @@ -328,9 +333,9 @@ public void testParseRequestConfig_Rerank_DefaultTaskSettings() { ); ActionListener modelListener = ActionListener.wrap(model -> { - assertThat(model, instanceOf(CustomElandModel.class)); + assertThat(model, instanceOf(CustomElandRerankModel.class)); assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); - assertThat(model.getServiceSettings(), instanceOf(ElasticsearchInternalServiceSettings.class)); + assertThat(model.getServiceSettings(), instanceOf(CustomElandInternalServiceSettings.class)); assertEquals(Boolean.TRUE, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); }, e -> { fail("Model parsing failed " + e.getMessage()); }); @@ -391,10 +396,14 @@ public void testParsePersistedConfig() { ) ); - CustomElandModel parsedModel = (CustomElandModel) service.parsePersistedConfig(randomInferenceEntityId, taskType, settings); - var elandServiceSettings = new CustomElandInternalServiceSettings(1, 4, "invalid"); + CustomElandEmbeddingModel parsedModel = (CustomElandEmbeddingModel) service.parsePersistedConfig( + randomInferenceEntityId, + taskType, + settings + ); + var elandServiceSettings = new CustomElandInternalTextEmbeddingServiceSettings(1, 4, "invalid", null); assertEquals( - new CustomElandModel(randomInferenceEntityId, taskType, ElasticsearchInternalService.NAME, elandServiceSettings), + new CustomElandEmbeddingModel(randomInferenceEntityId, taskType, ElasticsearchInternalService.NAME, elandServiceSettings), parsedModel ); } @@ -422,7 +431,8 @@ public void testParsePersistedConfig() { var e5ServiceSettings = new MultilingualE5SmallInternalServiceSettings( 1, 4, - ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID + ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_MODEL_ID, + null ); MultilingualE5SmallModel parsedModel = (MultilingualE5SmallModel) service.parsePersistedConfig( @@ -492,7 +502,7 @@ public void testChunkInfer() { "foo", TaskType.TEXT_EMBEDDING, "e5", - new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform") + new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform", null) ); var service = createService(client); @@ -586,7 +596,7 @@ public void testChunkInferSetsTokenization() { "foo", TaskType.TEXT_EMBEDDING, "e5", - new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform") + new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform", null) ); var service = createService(client); @@ -683,7 +693,7 @@ public void testParseRequestConfigEland_PreservesTaskType() { new GetTrainedModelsAction.Response(new QueryPage<>(List.of(mock(TrainedModelConfig.class)), 1, mock(ParseField.class))) ); return Void.TYPE; - }).when(client).execute(any(), any(), any()); + }).when(client).execute(eq(GetTrainedModelsAction.INSTANCE), any(), any()); when(client.threadPool()).thenReturn(threadPool); var service = createService(client); @@ -702,16 +712,8 @@ public void testParseRequestConfigEland_PreservesTaskType() { ) ); - var serviceSettings = new CustomElandInternalServiceSettings(1, 4, "custom-model"); - var taskType = randomFrom(TaskType.values()); - var taskSettings = taskType == TaskType.RERANK ? CustomElandRerankTaskSettings.DEFAULT_SETTINGS : null; - var expectedModel = CustomElandModel.build( - randomInferenceEntityId, - taskType, - ElasticsearchInternalService.NAME, - serviceSettings, - taskSettings - ); + var taskType = randomFrom(EnumSet.of(TaskType.RERANK, TaskType.TEXT_EMBEDDING)); + CustomElandModel expectedModel = getCustomElandModel(taskType); PlainActionFuture listener = new PlainActionFuture<>(); service.parseRequestConfig(randomInferenceEntityId, taskType, settings, Set.of(), listener); @@ -719,6 +721,29 @@ public void testParseRequestConfigEland_PreservesTaskType() { assertThat(model, is(expectedModel)); } + private CustomElandModel getCustomElandModel(TaskType taskType) { + CustomElandModel expectedModel = null; + if (taskType == TaskType.RERANK) { + expectedModel = new CustomElandRerankModel( + randomInferenceEntityId, + taskType, + ElasticsearchInternalService.NAME, + new CustomElandInternalServiceSettings(1, 4, "custom-model", null), + CustomElandRerankTaskSettings.DEFAULT_SETTINGS + ); + } else if (taskType == TaskType.TEXT_EMBEDDING) { + var serviceSettings = new CustomElandInternalTextEmbeddingServiceSettings(1, 4, "custom-model", null); + + expectedModel = new CustomElandEmbeddingModel( + randomInferenceEntityId, + taskType, + ElasticsearchInternalService.NAME, + serviceSettings + ); + } + return expectedModel; + } + public void testBuildInferenceRequest() { var id = randomAlphaOfLength(5); var inputs = randomList(1, 3, () -> randomAlphaOfLength(4)); @@ -763,7 +788,7 @@ public void testPutModel() { "my-e5", TaskType.TEXT_EMBEDDING, "e5", - new MultilingualE5SmallInternalServiceSettings(1, 1, ".multilingual-e5-small") + new MultilingualE5SmallInternalServiceSettings(1, 1, ".multilingual-e5-small", null) ); service.putModel(model, new ActionListener<>() { @@ -782,6 +807,63 @@ public void onFailure(Exception e) { assertEquals("text_field", putConfig.getInput().getFieldNames().get(0)); } + public void testParseRequestConfigEland_SetsDimensionsToOne() { + var client = mock(Client.class); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock + .getArguments()[2]; + listener.onResponse( + new InferModelAction.Response(List.of(new MlTextEmbeddingResults("field", new double[] { 0.1 }, false)), "id", true) + ); + + var request = (InferModelAction.Request) invocationOnMock.getArguments()[1]; + assertThat(request.getId(), is("custom-model")); + return Void.TYPE; + }).when(client).execute(eq(InferModelAction.INSTANCE), any(), any()); + when(client.threadPool()).thenReturn(threadPool); + + var service = createService(client); + + var serviceSettings = new CustomElandInternalTextEmbeddingServiceSettings( + 1, + 4, + "custom-model", + null, + 1, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ); + var taskType = TaskType.TEXT_EMBEDDING; + var expectedModel = new CustomElandEmbeddingModel( + randomInferenceEntityId, + taskType, + ElasticsearchInternalService.NAME, + serviceSettings + ); + + PlainActionFuture listener = new PlainActionFuture<>(); + service.checkModelConfig( + new CustomElandEmbeddingModel( + randomInferenceEntityId, + taskType, + ElasticsearchInternalService.NAME, + new CustomElandInternalTextEmbeddingServiceSettings( + 1, + 4, + "custom-model", + null, + null, + SimilarityMeasure.COSINE, + DenseVectorFieldMapper.ElementType.FLOAT + ) + ), + listener + ); + var model = listener.actionGet(TimeValue.THIRTY_SECONDS); + assertThat(model, is(expectedModel)); + } + private ElasticsearchInternalService createService(Client client) { var context = new InferenceServiceExtension.InferenceServiceFactoryContext(client); return new ElasticsearchInternalService(context); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettingsTests.java index fbff04efe6883..2804d3c72229f 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallInternalServiceSettingsTests.java @@ -24,7 +24,8 @@ public static MultilingualE5SmallInternalServiceSettings createRandom() { return new MultilingualE5SmallInternalServiceSettings( randomIntBetween(1, 4), randomIntBetween(1, 4), - randomFrom(ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS) + randomFrom(ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS), + null ); } @@ -56,7 +57,7 @@ public void testFromMap() { ) ) ).build(); - assertEquals(new MultilingualE5SmallInternalServiceSettings(1, 4, randomModelVariant), serviceSettings); + assertEquals(new MultilingualE5SmallInternalServiceSettings(1, 4, randomModelVariant, null), serviceSettings); } public void testFromMapInvalidVersion() { @@ -96,7 +97,10 @@ public void testFromMapMissingOptions() { ) ); - assertThat(e.getMessage(), containsString("[service_settings] does not contain the required setting [num_allocations]")); + assertThat( + e.getMessage(), + containsString("[service_settings] does not contain one of the required settings [num_allocations, adaptive_allocations]") + ); } public void testFromMapInvalidSettings() { @@ -130,12 +134,14 @@ protected MultilingualE5SmallInternalServiceSettings mutateInstance(Multilingual case 0 -> new MultilingualE5SmallInternalServiceSettings( instance.getNumAllocations() + 1, instance.getNumThreads(), - instance.getModelId() + instance.getModelId(), + null ); case 1 -> new MultilingualE5SmallInternalServiceSettings( instance.getNumAllocations(), instance.getNumThreads() + 1, - instance.getModelId() + instance.getModelId(), + null ); case 2 -> { var versions = new HashSet<>(ElasticsearchInternalService.MULTILINGUAL_E5_SMALL_VALID_IDS); @@ -143,7 +149,8 @@ protected MultilingualE5SmallInternalServiceSettings mutateInstance(Multilingual yield new MultilingualE5SmallInternalServiceSettings( instance.getNumAllocations(), instance.getNumThreads(), - versions.iterator().next() + versions.iterator().next(), + null ); } default -> throw new IllegalStateException(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettingsTests.java index c0e425144a618..8f60f5b4b673b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceSettingsTests.java @@ -25,7 +25,8 @@ public static ElserInternalServiceSettings createRandom() { return new ElserInternalServiceSettings( randomIntBetween(1, 4), randomIntBetween(1, 2), - randomFrom(ElserInternalService.VALID_ELSER_MODEL_IDS) + randomFrom(ElserInternalService.VALID_ELSER_MODEL_IDS), + null ); } @@ -49,7 +50,7 @@ public void testFromMap() { ) ) ).build(); - assertEquals(new ElserInternalServiceSettings(1, 4, ".elser_model_1"), serviceSettings); + assertEquals(new ElserInternalServiceSettings(1, 4, ".elser_model_1", null), serviceSettings); } public void testFromMapInvalidVersion() { @@ -84,17 +85,20 @@ public void testFromMapMissingOptions() { () -> ElserInternalServiceSettings.fromMap(new HashMap<>(Map.of(ElserInternalServiceSettings.NUM_THREADS, 1))) ); - assertThat(e.getMessage(), containsString("[service_settings] does not contain the required setting [num_allocations]")); + assertThat( + e.getMessage(), + containsString("[service_settings] does not contain one of the required settings [num_allocations, adaptive_allocations]") + ); } public void testBwcWrite() throws IOException { { - var settings = new ElserInternalServiceSettings(1, 1, ".elser_model_1"); + var settings = new ElserInternalServiceSettings(1, 1, ".elser_model_1", null); var copy = copyInstance(settings, TransportVersions.V_8_12_0); assertEquals(settings, copy); } { - var settings = new ElserInternalServiceSettings(1, 1, ".elser_model_1"); + var settings = new ElserInternalServiceSettings(1, 1, ".elser_model_1", null); var copy = copyInstance(settings, TransportVersions.V_8_11_X); assertEquals(settings, copy); } @@ -123,12 +127,27 @@ protected ElserInternalServiceSettings createTestInstance() { @Override protected ElserInternalServiceSettings mutateInstance(ElserInternalServiceSettings instance) { return switch (randomIntBetween(0, 2)) { - case 0 -> new ElserInternalServiceSettings(instance.getNumAllocations() + 1, instance.getNumThreads(), instance.getModelId()); - case 1 -> new ElserInternalServiceSettings(instance.getNumAllocations(), instance.getNumThreads() + 1, instance.getModelId()); + case 0 -> new ElserInternalServiceSettings( + instance.getNumAllocations() + 1, + instance.getNumThreads(), + instance.getModelId(), + null + ); + case 1 -> new ElserInternalServiceSettings( + instance.getNumAllocations(), + instance.getNumThreads() + 1, + instance.getModelId(), + null + ); case 2 -> { var versions = new HashSet<>(ElserInternalService.VALID_ELSER_MODEL_IDS); versions.remove(instance.getModelId()); - yield new ElserInternalServiceSettings(instance.getNumAllocations(), instance.getNumThreads(), versions.iterator().next()); + yield new ElserInternalServiceSettings( + instance.getNumAllocations(), + instance.getNumThreads(), + versions.iterator().next(), + null + ); } default -> throw new IllegalStateException(); }; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java index bc7dca4f11960..5ee55003e7fe1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserInternalServiceTests.java @@ -108,7 +108,7 @@ public void testParseConfigStrict() { "foo", TaskType.SPARSE_EMBEDDING, ElserInternalService.NAME, - new ElserInternalServiceSettings(1, 4, ".elser_model_1"), + new ElserInternalServiceSettings(1, 4, ".elser_model_1", null), ElserMlNodeTaskSettings.DEFAULT ); @@ -141,7 +141,7 @@ public void testParseConfigLooseWithOldModelId() { "foo", TaskType.SPARSE_EMBEDDING, ElserInternalService.NAME, - new ElserInternalServiceSettings(1, 4, ".elser_model_1"), + new ElserInternalServiceSettings(1, 4, ".elser_model_1", null), ElserMlNodeTaskSettings.DEFAULT ); @@ -171,7 +171,7 @@ public void testParseConfigStrictWithNoTaskSettings() { "foo", TaskType.SPARSE_EMBEDDING, ElserInternalService.NAME, - new ElserInternalServiceSettings(1, 4, ElserInternalService.ELSER_V2_MODEL), + new ElserInternalServiceSettings(1, 4, ElserInternalService.ELSER_V2_MODEL, null), ElserMlNodeTaskSettings.DEFAULT ); @@ -373,7 +373,7 @@ public void testChunkInfer() { "foo", TaskType.SPARSE_EMBEDDING, "elser", - new ElserInternalServiceSettings(1, 1, "elser"), + new ElserInternalServiceSettings(1, 1, "elser", null), new ElserMlNodeTaskSettings() ); var service = createService(client); @@ -437,7 +437,7 @@ public void testChunkInferSetsTokenization() { "foo", TaskType.SPARSE_EMBEDDING, "elser", - new ElserInternalServiceSettings(1, 1, "elser"), + new ElserInternalServiceSettings(1, 1, "elser", null), new ElserMlNodeTaskSettings() ); var service = createService(client); @@ -489,7 +489,7 @@ public void testPutModel() { "my-elser", TaskType.SPARSE_EMBEDDING, "elser", - new ElserInternalServiceSettings(1, 1, ".elser_model_2"), + new ElserInternalServiceSettings(1, 1, ".elser_model_2", null), ElserMlNodeTaskSettings.DEFAULT ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java index 45dd8ad7b33bd..f807f8e5205b2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; @@ -57,8 +56,8 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -945,23 +944,4 @@ private Map getRequestConfigMap( private GoogleAiStudioService createGoogleAiStudioService() { return new GoogleAiStudioService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } - - private PersistedConfig getPersistedConfigMap( - Map serviceSettings, - Map taskSettings, - Map secretSettings - ) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) - ); - } - - private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettingsTests.java new file mode 100644 index 0000000000000..95d3522b863a9 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiSecretSettingsTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiSecretSettingsTests extends AbstractBWCWireSerializationTestCase { + + public static GoogleVertexAiSecretSettings createRandom() { + return new GoogleVertexAiSecretSettings(randomSecureStringOfLength(30)); + } + + public void testFromMap_ReturnsNull_WhenMapIsNUll() { + assertNull(GoogleVertexAiSecretSettings.fromMap(null)); + } + + public void testFromMap_ThrowsError_IfServiceAccountJsonIsMissing() { + expectThrows(ValidationException.class, () -> GoogleVertexAiSecretSettings.fromMap(new HashMap<>())); + } + + public void testFromMap_ThrowsError_IfServiceAccountJsonIsEmpty() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiSecretSettings.fromMap(new HashMap<>(Map.of(GoogleVertexAiSecretSettings.SERVICE_ACCOUNT_JSON, ""))) + ); + } + + public void testToXContent_WritesServiceAccountJson() throws IOException { + var secretSettings = new GoogleVertexAiSecretSettings(new SecureString("json")); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + secretSettings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"service_account_json":"json"}""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return GoogleVertexAiSecretSettings::new; + } + + @Override + protected GoogleVertexAiSecretSettings createTestInstance() { + return createRandom(); + } + + @Override + protected GoogleVertexAiSecretSettings mutateInstance(GoogleVertexAiSecretSettings instance) throws IOException { + return randomValueOtherThan(instance, GoogleVertexAiSecretSettingsTests::createRandom); + } + + @Override + protected GoogleVertexAiSecretSettings mutateInstanceForVersion(GoogleVertexAiSecretSettings instance, TransportVersion version) { + return instance; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java new file mode 100644 index 0000000000000..d8c727c5a58bc --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java @@ -0,0 +1,603 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; +import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; +import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankTaskSettings; +import org.hamcrest.CoreMatchers; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; +import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; +import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class GoogleVertexAiServiceTests extends ESTestCase { + + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + + private HttpClientManager clientManager; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(inferenceUtilityPool()); + clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); + } + + @After + public void shutdown() throws IOException { + clientManager.close(); + terminate(threadPool); + webServer.close(); + } + + public void testParseRequestConfig_CreatesGoogleVertexAiEmbeddingsModel() throws IOException { + var projectId = "project"; + var location = "location"; + var modelId = "model"; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + ActionListener modelListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(GoogleVertexAiEmbeddingsModel.class)); + + var embeddingsModel = (GoogleVertexAiEmbeddingsModel) model; + + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().location(), is(location)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + }, e -> fail("Model parsing should succeeded, but failed: " + e.getMessage())); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + GoogleVertexAiServiceFields.LOCATION, + location, + GoogleVertexAiServiceFields.PROJECT_ID, + projectId + ) + ), + new HashMap<>(Map.of()), + getSecretSettingsMap(serviceAccountJson) + ), + Set.of(), + modelListener + ); + } + } + + public void testParseRequestConfig_CreatesGoogleVertexAiRerankModel() throws IOException { + var projectId = "project"; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + ActionListener modelListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(GoogleVertexAiRerankModel.class)); + + var rerankModel = (GoogleVertexAiRerankModel) model; + + assertThat(rerankModel.getServiceSettings().projectId(), is(projectId)); + assertThat(rerankModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + }, e -> fail("Model parsing should succeeded, but failed: " + e.getMessage())); + + service.parseRequestConfig( + "id", + TaskType.RERANK, + getRequestConfigMap( + new HashMap<>(Map.of(GoogleVertexAiServiceFields.PROJECT_ID, projectId)), + new HashMap<>(Map.of()), + getSecretSettingsMap(serviceAccountJson) + ), + Set.of(), + modelListener + ); + } + } + + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { + try (var service = createGoogleVertexAiService()) { + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "The [googlevertexai] service does not support task type [sparse_embedding]" + ); + + service.parseRequestConfig( + "id", + TaskType.SPARSE_EMBEDDING, + getRequestConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project" + ) + ), + new HashMap<>(Map.of()), + getSecretSettingsMap("{}") + ), + Set.of(), + failureListener + ); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws IOException { + try (var service = createGoogleVertexAiService()) { + var config = getRequestConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project" + ) + ), + getTaskSettingsMap(true), + getSecretSettingsMap("{}") + ); + config.put("extra_key", "value"); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [googlevertexai] service" + ); + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), failureListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMap() throws IOException { + try (var service = createGoogleVertexAiService()) { + Map serviceSettings = new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project" + ) + ); + serviceSettings.put("extra_key", "value"); + + var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap(true), getSecretSettingsMap("{}")); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [googlevertexai] service" + ); + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), failureListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInTaskSettingsMap() throws IOException { + try (var service = createGoogleVertexAiService()) { + Map taskSettingsMap = new HashMap<>(); + taskSettingsMap.put("extra_key", "value"); + + var config = getRequestConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project" + ) + ), + taskSettingsMap, + getSecretSettingsMap("{}") + ); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [googlevertexai] service" + ); + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), failureListener); + } + } + + public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap() throws IOException { + try (var service = createGoogleVertexAiService()) { + Map secretSettings = getSecretSettingsMap("{}"); + secretSettings.put("extra_key", "value"); + + var config = getRequestConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project" + ) + ), + getTaskSettingsMap(true), + secretSettings + ); + + var failureListener = getModelListenerForException( + ElasticsearchStatusException.class, + "Model configuration contains settings [{extra_key=value}] unknown to the [googlevertexai] service" + ); + service.parseRequestConfig("id", TaskType.TEXT_EMBEDDING, config, Set.of(), failureListener); + } + } + + public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiEmbeddingsModel() throws IOException { + var projectId = "project"; + var location = "location"; + var modelId = "model"; + var autoTruncate = true; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + GoogleVertexAiServiceFields.LOCATION, + location, + GoogleVertexAiServiceFields.PROJECT_ID, + projectId, + GoogleVertexAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, + true + ) + ), + getTaskSettingsMap(autoTruncate), + getSecretSettingsMap(serviceAccountJson) + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(GoogleVertexAiEmbeddingsModel.class)); + + var embeddingsModel = (GoogleVertexAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().location(), is(location)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + } + } + + public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiRerankModel() throws IOException { + var projectId = "project"; + var topN = 1; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>(Map.of(GoogleVertexAiServiceFields.PROJECT_ID, projectId)), + getTaskSettingsMap(topN), + getSecretSettingsMap(serviceAccountJson) + ); + + var model = service.parsePersistedConfigWithSecrets("id", TaskType.RERANK, persistedConfig.config(), persistedConfig.secrets()); + + assertThat(model, instanceOf(GoogleVertexAiRerankModel.class)); + + var rerankModel = (GoogleVertexAiRerankModel) model; + assertThat(rerankModel.getServiceSettings().projectId(), is(projectId)); + assertThat(rerankModel.getTaskSettings(), is(new GoogleVertexAiRerankTaskSettings(topN))); + assertThat(rerankModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { + var projectId = "project"; + var location = "location"; + var modelId = "model"; + var autoTruncate = true; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + GoogleVertexAiServiceFields.LOCATION, + location, + GoogleVertexAiServiceFields.PROJECT_ID, + projectId, + GoogleVertexAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, + true + ) + ), + getTaskSettingsMap(autoTruncate), + getSecretSettingsMap(serviceAccountJson) + ); + persistedConfig.config().put("extra_key", "value"); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(GoogleVertexAiEmbeddingsModel.class)); + + var embeddingsModel = (GoogleVertexAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().location(), is(location)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecretsSettings() throws IOException { + var projectId = "project"; + var location = "location"; + var modelId = "model"; + var autoTruncate = true; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + var secretSettingsMap = getSecretSettingsMap(serviceAccountJson); + secretSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project", + GoogleVertexAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, + true + ) + ), + getTaskSettingsMap(autoTruncate), + secretSettingsMap + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(GoogleVertexAiEmbeddingsModel.class)); + + var embeddingsModel = (GoogleVertexAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().location(), is(location)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { + var projectId = "project"; + var location = "location"; + var modelId = "model"; + var autoTruncate = true; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + var serviceSettingsMap = new HashMap( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project", + GoogleVertexAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, + true + ) + ); + serviceSettingsMap.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + serviceSettingsMap, + getTaskSettingsMap(autoTruncate), + getSecretSettingsMap(serviceAccountJson) + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(GoogleVertexAiEmbeddingsModel.class)); + + var embeddingsModel = (GoogleVertexAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().location(), is(location)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + } + } + + public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInTaskSettings() throws IOException { + var projectId = "project"; + var location = "location"; + var modelId = "model"; + var autoTruncate = true; + var serviceAccountJson = """ + { + "some json" + } + """; + + try (var service = createGoogleVertexAiService()) { + var taskSettings = getTaskSettingsMap(autoTruncate); + taskSettings.put("extra_key", "value"); + + var persistedConfig = getPersistedConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + "model", + GoogleVertexAiServiceFields.LOCATION, + "location", + GoogleVertexAiServiceFields.PROJECT_ID, + "project", + GoogleVertexAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, + true + ) + ), + taskSettings, + getSecretSettingsMap(serviceAccountJson) + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(GoogleVertexAiEmbeddingsModel.class)); + + var embeddingsModel = (GoogleVertexAiEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().location(), is(location)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); + } + } + + // testInfer tested via end-to-end notebook tests in AppEx repo + + private GoogleVertexAiService createGoogleVertexAiService() { + return new GoogleVertexAiService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); + } + + private Map getRequestConfigMap( + Map serviceSettings, + Map taskSettings, + Map secretSettings + ) { + var builtServiceSettings = new HashMap<>(); + builtServiceSettings.putAll(serviceSettings); + builtServiceSettings.putAll(secretSettings); + + return new HashMap<>( + Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) + ); + } + + private static Map getSecretSettingsMap(String serviceAccountJson) { + return new HashMap<>(Map.of(GoogleVertexAiSecretSettings.SERVICE_ACCOUNT_JSON, serviceAccountJson)); + } + + private static ActionListener getModelListenerForException(Class exceptionClass, String expectedMessage) { + return ActionListener.wrap((model) -> fail("Model parsing should have failed"), e -> { + assertThat(e, Matchers.instanceOf(exceptionClass)); + assertThat(e.getMessage(), CoreMatchers.is(expectedMessage)); + }); + } + + private static Map getTaskSettingsMap(Boolean autoTruncate) { + var taskSettings = new HashMap(); + + taskSettings.put(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate); + + return taskSettings; + } + + private static Map getTaskSettingsMap(Integer topN) { + var taskSettings = new HashMap(); + + taskSettings.put(GoogleVertexAiRerankTaskSettings.TOP_N, topN); + + return taskSettings; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java new file mode 100644 index 0000000000000..ca38bdb6e2c6c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; + +import java.net.URI; +import java.net.URISyntaxException; + +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiEmbeddingsModelTests extends ESTestCase { + + public void testBuildUri() throws URISyntaxException { + var location = "location"; + var projectId = "project"; + var modelId = "model"; + + URI uri = GoogleVertexAiEmbeddingsModel.buildUri(location, projectId, modelId); + + assertThat( + uri, + is( + new URI( + Strings.format( + "https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict", + location, + projectId, + location, + modelId + ) + ) + ) + ); + } + + public static GoogleVertexAiEmbeddingsModel createModel( + String location, + String projectId, + String modelId, + String uri, + String serviceAccountJson + ) { + return new GoogleVertexAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + uri, + new GoogleVertexAiEmbeddingsServiceSettings(location, projectId, modelId, false, null, null, null, null), + new GoogleVertexAiEmbeddingsTaskSettings(Boolean.FALSE), + new GoogleVertexAiSecretSettings(new SecureString(serviceAccountJson.toCharArray())) + ); + } + + public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate) { + return new GoogleVertexAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + new GoogleVertexAiEmbeddingsServiceSettings( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + modelId, + false, + null, + null, + SimilarityMeasure.DOT_PRODUCT, + null + ), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate), + new GoogleVertexAiSecretSettings(new SecureString(randomAlphaOfLength(8).toCharArray())) + ); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..1e9a2f435cb08 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiEmbeddingsRequestTaskSettingsTests extends ESTestCase { + + public void testFromMap_ReturnsEmptySettings_IfMapEmpty() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>()); + assertThat(requestTaskSettings, is(GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS)); + } + + public void testFromMap_DoesNotThrowValidationException_IfAutoTruncateIsMissing() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of("unrelated", true))); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null))); + } + + public void testFromMap_ExtractsAutoTruncate() { + var autoTruncate = true; + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate)) + ); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate))); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsServiceSettingsTests.java new file mode 100644 index 0000000000000..2b8630ec7e041 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsServiceSettingsTests.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.inference.SimilarityMeasure; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; + +import java.io.IOException; +import java.util.HashMap; + +import static org.elasticsearch.xpack.inference.MatchersUtils.equalToIgnoringWhitespaceInJsonString; +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiEmbeddingsServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + GoogleVertexAiEmbeddingsServiceSettings> { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var location = randomAlphaOfLength(8); + var projectId = randomAlphaOfLength(8); + var model = randomAlphaOfLength(8); + var dimensionsSetByUser = randomBoolean(); + var maxInputTokens = randomFrom(new Integer[] { null, randomNonNegativeInt() }); + var similarityMeasure = randomFrom(new SimilarityMeasure[] { null, randomFrom(SimilarityMeasure.values()) }); + var similarityMeasureString = similarityMeasure == null ? null : similarityMeasure.toString(); + var dims = randomFrom(new Integer[] { null, randomNonNegativeInt() }); + var configurationParseContext = ConfigurationParseContext.PERSISTENT; + + var serviceSettings = GoogleVertexAiEmbeddingsServiceSettings.fromMap(new HashMap<>() { + { + put(GoogleVertexAiServiceFields.LOCATION, location); + put(GoogleVertexAiServiceFields.PROJECT_ID, projectId); + put(ServiceFields.MODEL_ID, model); + put(GoogleVertexAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, dimensionsSetByUser); + put(ServiceFields.MAX_INPUT_TOKENS, maxInputTokens); + put(ServiceFields.SIMILARITY, similarityMeasureString); + put(ServiceFields.DIMENSIONS, dims); + } + }, configurationParseContext); + + assertThat( + serviceSettings, + is( + new GoogleVertexAiEmbeddingsServiceSettings( + location, + projectId, + model, + dimensionsSetByUser, + maxInputTokens, + dims, + similarityMeasure, + null + ) + ) + ); + } + + public void testToXContent_WritesAllValues() throws IOException { + var entity = new GoogleVertexAiEmbeddingsServiceSettings( + "location", + "projectId", + "modelId", + true, + 10, + 10, + SimilarityMeasure.DOT_PRODUCT, + null + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "location": "location", + "project_id": "projectId", + "model_id": "modelId", + "max_input_tokens": 10, + "dimensions": 10, + "similarity": "dot_product", + "rate_limit": { + "requests_per_minute": 30000 + }, + "dimensions_set_by_user": true + } + """)); + } + + public void testFilteredXContentObject_WritesAllValues_ExceptDimensionsSetByUser() throws IOException { + var entity = new GoogleVertexAiEmbeddingsServiceSettings( + "location", + "projectId", + "modelId", + true, + 10, + 10, + SimilarityMeasure.DOT_PRODUCT, + null + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + var filteredXContent = entity.getFilteredXContentObject(); + filteredXContent.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "location": "location", + "project_id": "projectId", + "model_id": "modelId", + "max_input_tokens": 10, + "dimensions": 10, + "similarity": "dot_product", + "rate_limit": { + "requests_per_minute": 30000 + } + } + """)); + } + + @Override + protected Writeable.Reader instanceReader() { + return GoogleVertexAiEmbeddingsServiceSettings::new; + } + + @Override + protected GoogleVertexAiEmbeddingsServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected GoogleVertexAiEmbeddingsServiceSettings mutateInstance(GoogleVertexAiEmbeddingsServiceSettings instance) throws IOException { + return randomValueOtherThan(instance, GoogleVertexAiEmbeddingsServiceSettingsTests::createRandom); + } + + @Override + protected GoogleVertexAiEmbeddingsServiceSettings mutateInstanceForVersion( + GoogleVertexAiEmbeddingsServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + private static GoogleVertexAiEmbeddingsServiceSettings createRandom() { + return new GoogleVertexAiEmbeddingsServiceSettings( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomBoolean(), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomFrom(new SimilarityMeasure[] { null, randomFrom(SimilarityMeasure.values()) }), + randomFrom(new RateLimitSettings[] { null, RateLimitSettingsTests.createRandom() }) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java new file mode 100644 index 0000000000000..364d8090786df --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiEmbeddingsTaskSettingsTests extends AbstractBWCWireSerializationTestCase { + + public void testFromMap_AutoTruncateIsSet() { + var autoTruncate = true; + var taskSettingsMap = getTaskSettingsMap(autoTruncate); + var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); + + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + } + + public void testFromMap_ThrowsValidationException_IfAutoTruncateIsInvalidValue() { + var taskSettings = getTaskSettingsMap("invalid"); + + expectThrows(ValidationException.class, () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettings)); + } + + public void testFromMap_AutoTruncateIsNull() { + var taskSettingsMap = getTaskSettingsMap(null); + var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); + // needed, because of constructors being ambiguous otherwise + Boolean nullBoolean = null; + + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(nullBoolean))); + } + + public void testFromMap_DoesNotThrow_WithEmptyMap() { + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).autoTruncate()); + } + + public void testOf_UseRequestSettings() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + + var requestAutoTruncate = originalAutoTruncate == false; + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate); + + assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(requestAutoTruncate)); + } + + public void testOf_UseOriginalSettings() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + + assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(originalAutoTruncate)); + } + + public void testToXContent_WritesAutoTruncateIfNotNull() throws IOException { + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"auto_truncate":true}""")); + } + + public void testToXContent_DoesNotWriteAutoTruncateIfNull() throws IOException { + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(null)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {}""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return GoogleVertexAiEmbeddingsTaskSettings::new; + } + + @Override + protected GoogleVertexAiEmbeddingsTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected GoogleVertexAiEmbeddingsTaskSettings mutateInstance(GoogleVertexAiEmbeddingsTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, GoogleVertexAiEmbeddingsTaskSettingsTests::createRandom); + } + + @Override + protected GoogleVertexAiEmbeddingsTaskSettings mutateInstanceForVersion( + GoogleVertexAiEmbeddingsTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + private static GoogleVertexAiEmbeddingsTaskSettings createRandom() { + return new GoogleVertexAiEmbeddingsTaskSettings(randomFrom(new Boolean[] { null, randomBoolean() })); + } + + private static Map getTaskSettingsMap(@Nullable Object autoTruncate) { + var map = new HashMap(); + + if (autoTruncate != null) { + map.put(AUTO_TRUNCATE, autoTruncate); + } + + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModelTests.java new file mode 100644 index 0000000000000..dff4e223cf9f4 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModelTests.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; + +import java.net.URI; +import java.net.URISyntaxException; + +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiRerankModelTests extends ESTestCase { + + public void testBuildUri() throws URISyntaxException { + var projectId = "project"; + + URI uri = GoogleVertexAiRerankModel.buildUri(projectId); + + assertThat( + uri, + is( + new URI( + Strings.format( + "https://discoveryengine.googleapis.com/v1/projects/%s/locations/global/rankingConfigs/default_ranking_config:rank", + projectId + ) + ) + ) + ); + } + + public static GoogleVertexAiRerankModel createModel(@Nullable String modelId, @Nullable Integer topN) { + return new GoogleVertexAiRerankModel( + "id", + TaskType.RERANK, + "service", + new GoogleVertexAiRerankServiceSettings(randomAlphaOfLength(10), modelId, null), + new GoogleVertexAiRerankTaskSettings(topN), + new GoogleVertexAiSecretSettings(randomSecureStringOfLength(8)) + ); + } + + public static GoogleVertexAiRerankModel createModel(String url, @Nullable String modelId, @Nullable Integer topN) { + return new GoogleVertexAiRerankModel( + "id", + TaskType.RERANK, + "service", + url, + new GoogleVertexAiRerankServiceSettings(randomAlphaOfLength(10), modelId, null), + new GoogleVertexAiRerankTaskSettings(topN), + new GoogleVertexAiSecretSettings(randomSecureStringOfLength(8)) + ); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankRequestTaskSettingsTests.java new file mode 100644 index 0000000000000..abe41793d6e70 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankRequestTaskSettingsTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiRerankRequestTaskSettingsTests extends ESTestCase { + + public void testFromMap_ReturnsEmptySettings_IfMapEmpty() { + var requestTaskSettings = GoogleVertexAiRerankRequestTaskSettings.fromMap(new HashMap<>()); + assertThat(requestTaskSettings, is(GoogleVertexAiRerankRequestTaskSettings.EMPTY_SETTINGS)); + } + + public void testFromMap_DoesNotThrowValidationException_IfTopNIsMissing() { + var requestTaskSettings = GoogleVertexAiRerankRequestTaskSettings.fromMap(new HashMap<>(Map.of("unrelated", 1))); + assertThat(requestTaskSettings, is(new GoogleVertexAiRerankRequestTaskSettings(null))); + } + + public void testFromMap_ExtractsTopN() { + var topN = 1; + var requestTaskSettings = GoogleVertexAiRerankRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiRerankTaskSettings.TOP_N, topN)) + ); + assertThat(requestTaskSettings, is(new GoogleVertexAiRerankRequestTaskSettings(topN))); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankServiceSettingsTests.java new file mode 100644 index 0000000000000..9a8eb897f021e --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankServiceSettingsTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.ServiceFields; +import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiServiceFields; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; + +import java.io.IOException; +import java.util.HashMap; + +import static org.elasticsearch.xpack.inference.MatchersUtils.equalToIgnoringWhitespaceInJsonString; +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiRerankServiceSettingsTests extends AbstractBWCWireSerializationTestCase { + + public void testFromMap_Request_CreatesSettingsCorrectly() { + var projectId = randomAlphaOfLength(10); + var modelId = randomFrom(new String[] { null, randomAlphaOfLength(10) }); + + var serviceSettings = GoogleVertexAiRerankServiceSettings.fromMap(new HashMap<>() { + { + put(GoogleVertexAiServiceFields.PROJECT_ID, projectId); + put(ServiceFields.MODEL_ID, modelId); + } + }, ConfigurationParseContext.REQUEST); + + assertThat(serviceSettings, is(new GoogleVertexAiRerankServiceSettings(projectId, modelId, null))); + } + + public void testToXContent_WritesAllValues() throws IOException { + var entity = new GoogleVertexAiRerankServiceSettings("projectId", "modelId", null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "project_id": "projectId", + "model_id": "modelId", + "rate_limit": { + "requests_per_minute": 300 + } + } + """)); + } + + public void testToXContent_DoesNotWriteModelIfNull() throws IOException { + var entity = new GoogleVertexAiRerankServiceSettings("projectId", null, null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "project_id": "projectId", + "rate_limit": { + "requests_per_minute": 300 + } + } + """)); + } + + public void testFilteredXContentObject_WritesAllValues() throws IOException { + var entity = new GoogleVertexAiRerankServiceSettings("projectId", "modelId", null); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + var filteredXContent = entity.getFilteredXContentObject(); + filteredXContent.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "project_id": "projectId", + "model_id": "modelId", + "rate_limit": { + "requests_per_minute": 300 + } + } + """)); + } + + @Override + protected Writeable.Reader instanceReader() { + return GoogleVertexAiRerankServiceSettings::new; + } + + @Override + protected GoogleVertexAiRerankServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected GoogleVertexAiRerankServiceSettings mutateInstance(GoogleVertexAiRerankServiceSettings instance) throws IOException { + return randomValueOtherThan(instance, GoogleVertexAiRerankServiceSettingsTests::createRandom); + } + + @Override + protected GoogleVertexAiRerankServiceSettings mutateInstanceForVersion( + GoogleVertexAiRerankServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + private static GoogleVertexAiRerankServiceSettings createRandom() { + return new GoogleVertexAiRerankServiceSettings( + randomAlphaOfLength(10), + randomFrom(new String[] { null, randomAlphaOfLength(10) }), + randomFrom(new RateLimitSettings[] { null, RateLimitSettingsTests.createRandom() }) + ); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankTaskSettingsTests.java new file mode 100644 index 0000000000000..5bfcbb9a3f636 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankTaskSettingsTests.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.googlevertexai.rerank; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.inference.MatchersUtils.equalToIgnoringWhitespaceInJsonString; +import static org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankTaskSettings.TOP_N; +import static org.hamcrest.Matchers.is; + +public class GoogleVertexAiRerankTaskSettingsTests extends AbstractBWCWireSerializationTestCase { + + public void testFromMap_TopNIsSet() { + var topN = 1; + var taskSettingsMap = getTaskSettingsMap(topN); + var taskSettings = GoogleVertexAiRerankTaskSettings.fromMap(taskSettingsMap); + + assertThat(taskSettings, is(new GoogleVertexAiRerankTaskSettings(topN))); + } + + public void testFromMap_ThrowsValidationException_IfTopNIsInvalidValue() { + var taskSettingsMap = getTaskSettingsMap("invalid"); + + expectThrows(ValidationException.class, () -> GoogleVertexAiRerankTaskSettings.fromMap(taskSettingsMap)); + } + + public void testFromMap_TopNIsNull() { + var taskSettingsMap = getTaskSettingsMap(null); + var taskSettings = GoogleVertexAiRerankTaskSettings.fromMap(taskSettingsMap); + // needed, because of constructors being ambiguous otherwise + Integer nullInt = null; + + assertThat(taskSettings, is(new GoogleVertexAiRerankTaskSettings(nullInt))); + } + + public void testFromMap_DoesNotThrow_WithEmptyMap() { + assertNull(GoogleVertexAiRerankTaskSettings.fromMap(new HashMap<>()).topN()); + } + + public void testOf_UseRequestSettings() { + var originalTopN = 1; + var originalSettings = new GoogleVertexAiRerankTaskSettings(originalTopN); + + var requestTopN = originalTopN + 1; + var requestTaskSettings = new GoogleVertexAiRerankRequestTaskSettings(requestTopN); + + assertThat(GoogleVertexAiRerankTaskSettings.of(originalSettings, requestTaskSettings).topN(), is(requestTopN)); + } + + public void testOf_UseOriginalSettings() { + var originalTopN = 1; + var originalSettings = new GoogleVertexAiRerankTaskSettings(originalTopN); + + var requestTaskSettings = new GoogleVertexAiRerankRequestTaskSettings(null); + + assertThat(GoogleVertexAiRerankTaskSettings.of(originalSettings, requestTaskSettings).topN(), is(originalTopN)); + } + + public void testToXContent_WritesTopNIfNotNull() throws IOException { + var settings = GoogleVertexAiRerankTaskSettings.fromMap(getTaskSettingsMap(1)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "top_n":1 + } + """)); + } + + public void testToXContent_DoesNotWriteTopNIfNull() throws IOException { + var settings = GoogleVertexAiRerankTaskSettings.fromMap(getTaskSettingsMap(null)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {}""")); + } + + @Override + protected Writeable.Reader instanceReader() { + return GoogleVertexAiRerankTaskSettings::new; + } + + @Override + protected GoogleVertexAiRerankTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected GoogleVertexAiRerankTaskSettings mutateInstance(GoogleVertexAiRerankTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, GoogleVertexAiRerankTaskSettingsTests::createRandom); + } + + @Override + protected GoogleVertexAiRerankTaskSettings mutateInstanceForVersion( + GoogleVertexAiRerankTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + private static GoogleVertexAiRerankTaskSettings createRandom() { + return new GoogleVertexAiRerankTaskSettings(randomFrom(new Integer[] { null, randomNonNegativeInt() })); + } + + private static Map getTaskSettingsMap(@Nullable Object topN) { + var map = new HashMap(); + + if (topN != null) { + map.put(TOP_N, topN); + } + + return map; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index de5c7ec83d57e..61504603e62ee 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; import org.elasticsearch.inference.ChunkingOptions; @@ -22,7 +21,6 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; @@ -56,7 +54,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResultsTests.asMapWithListsInsteadOfArrays; -import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -198,7 +196,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModel() throws IOException { try (var service = createHuggingFaceService()) { - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); + var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), new HashMap<>(), getSecretSettingsMap("secret")); var model = service.parsePersistedConfigWithSecrets( "id", @@ -217,7 +215,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModel() throw public void testParsePersistedConfigWithSecrets_CreatesAnElserModel() throws IOException { try (var service = createHuggingFaceService()) { - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); + var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), new HashMap<>(), getSecretSettingsMap("secret")); var model = service.parsePersistedConfigWithSecrets( "id", @@ -236,7 +234,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnElserModel() throws IOE public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { try (var service = createHuggingFaceService()) { - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); + var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), new HashMap<>(), getSecretSettingsMap("secret")); persistedConfig.config().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( @@ -259,7 +257,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists var secretSettingsMap = getSecretSettingsMap("secret"); secretSettingsMap.put("extra_key", "value"); - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), secretSettingsMap); + var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), new HashMap<>(), secretSettingsMap); var model = service.parsePersistedConfigWithSecrets( "id", @@ -278,7 +276,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { try (var service = createHuggingFaceService()) { - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); + var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), new HashMap<>(), getSecretSettingsMap("secret")); persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( @@ -301,7 +299,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists var serviceSettingsMap = getServiceSettingsMap("url"); serviceSettingsMap.put("extra_key", "value"); - var persistedConfig = getPersistedConfigMap(serviceSettingsMap, getSecretSettingsMap("secret")); + var persistedConfig = getPersistedConfigMap(serviceSettingsMap, new HashMap<>(), getSecretSettingsMap("secret")); var model = service.parsePersistedConfigWithSecrets( "id", @@ -356,7 +354,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModel() throws IOExcepti public void testParsePersistedConfig_CreatesAnElserModel() throws IOException { try (var service = createHuggingFaceService()) { - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url")); + var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), new HashMap<>()); var model = service.parsePersistedConfig("id", TaskType.SPARSE_EMBEDDING, persistedConfig.config()); @@ -529,12 +527,15 @@ public void testCheckModelConfig_IncludesMaxTokens() throws IOException { """; webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); - var model = HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret", 1); + var model = HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret", 1, 1, SimilarityMeasure.DOT_PRODUCT); PlainActionFuture listener = new PlainActionFuture<>(); service.checkModelConfig(model, listener); var result = listener.actionGet(TIMEOUT); - assertThat(result, is(HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret", 1, 1))); + assertThat( + result, + is(HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret", 1, 1, SimilarityMeasure.DOT_PRODUCT)) + ); } } @@ -566,7 +567,7 @@ public void testCheckModelConfig_UsesUserSpecifiedSimilarity() throws IOExceptio } } - public void testCheckModelConfig_LeavesSimilarityAsNull_WhenUnspecified() throws IOException { + public void testCheckModelConfig_DefaultsSimilarityToCosine() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); try (var service = new HuggingFaceService(senderFactory, createWithEmptySettings(threadPool))) { @@ -587,11 +588,13 @@ public void testCheckModelConfig_LeavesSimilarityAsNull_WhenUnspecified() throws service.checkModelConfig(model, listener); var result = listener.actionGet(TIMEOUT); - assertThat(result, is(HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret", 1, 1, null))); + assertThat( + result, + is(HuggingFaceEmbeddingsModelTests.createModel(getUrl(webServer), "secret", 1, 1, SimilarityMeasure.COSINE)) + ); } } - // TODO public void testChunkedInfer_CallsInfer_TextEmbedding_ConvertsFloatResponse() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); @@ -712,25 +715,4 @@ private Map getRequestConfigMap(Map serviceSetti return new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings)); } - private PersistedConfig getPersistedConfigMap(Map serviceSettings) { - return getPersistedConfigMap(serviceSettings, Map.of(), null); - } - - private PersistedConfig getPersistedConfigMap(Map serviceSettings, @Nullable Map secretSettings) { - return getPersistedConfigMap(serviceSettings, Map.of(), secretSettings); - } - - private PersistedConfig getPersistedConfigMap( - Map serviceSettings, - Map taskSettings, - Map secretSettings - ) { - - var secrets = secretSettings == null ? null : new HashMap(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)); - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - secrets - ); - } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java index ba37203d9e5d6..1e3dd1e348f55 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; @@ -54,8 +53,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -587,18 +586,6 @@ private Map getRequestConfigMap( ); } - private PersistedConfig getPersistedConfigMap( - Map serviceSettings, - Map taskSettings, - Map secretSettings - ) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) - ); - } - private static Map getEmbeddingsServiceSettingsMap( String model, @Nullable Integer dimensions, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettingsTests.java index 076986acdcee6..009a6dbdeb793 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/embeddings/MistralEmbeddingsServiceSettingsTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.services.mistral.embeddings; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.core.Nullable; @@ -27,6 +28,7 @@ import java.util.Map; import static org.elasticsearch.xpack.inference.services.ServiceFields.SIMILARITY; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; public class MistralEmbeddingsServiceSettingsTests extends ESTestCase { @@ -77,6 +79,84 @@ public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsIs assertThat(serviceSettings, is(new MistralEmbeddingsServiceSettings(model, null, null, null, null))); } + public void testFromMap_ThrowsException_WhenDimensionsAreZero() { + var model = "mistral-embed"; + var dimensions = 0; + + var settingsMap = createRequestSettingsMap(model, dimensions, null, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [dimensions] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenDimensionsAreNegative() { + var model = "mistral-embed"; + var dimensions = randomNegativeInt(); + + var settingsMap = createRequestSettingsMap(model, dimensions, null, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [dimensions] must be a positive integer;", + dimensions + ) + ) + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreZero() { + var model = "mistral-embed"; + var maxInputTokens = 0; + + var settingsMap = createRequestSettingsMap(model, null, maxInputTokens, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [max_input_tokens] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreNegative() { + var model = "mistral-embed"; + var maxInputTokens = randomNegativeInt(); + + var settingsMap = createRequestSettingsMap(model, null, maxInputTokens, SimilarityMeasure.COSINE); + + var thrownException = expectThrows( + ValidationException.class, + () -> MistralEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [max_input_tokens] must be a positive integer;", + maxInputTokens + ) + ) + ); + } + public void testFromMap_PersistentContext_DoesNotThrowException_WhenSimilarityIsPresent() { var model = "mistral-embed"; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index 2fc049dd3a5f6..9ff175ca9685e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; -import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; @@ -55,8 +54,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; +import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -343,7 +342,7 @@ public void testParseRequestConfig_MovesModel() throws IOException { public void testParsePersistedConfigWithSecrets_CreatesAnOpenAiEmbeddingsModel() throws IOException { try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", "url", "org", 100, false), + getServiceSettingsMap("model", "url", "org", 100, null, false), getTaskSettingsMap("user"), getSecretSettingsMap("secret") ); @@ -394,7 +393,7 @@ public void testParsePersistedConfigWithSecrets_ThrowsErrorTryingToParseInvalidM public void testParsePersistedConfigWithSecrets_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", null, null, null, true), + getServiceSettingsMap("model", null, null, null, null, true), getTaskSettingsMap(null), getSecretSettingsMap("secret") ); @@ -420,7 +419,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnOpenAiEmbeddingsModelWi public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", "url", "org", null, true), + getServiceSettingsMap("model", "url", "org", null, null, true), getTaskSettingsMap("user"), getSecretSettingsMap("secret") ); @@ -451,7 +450,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists secretSettingsMap.put("extra_key", "value"); var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", "url", "org", null, true), + getServiceSettingsMap("model", "url", "org", null, null, true), getTaskSettingsMap("user"), secretSettingsMap ); @@ -477,7 +476,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", "url", "org", null, true), + getServiceSettingsMap("model", "url", "org", null, null, true), getTaskSettingsMap("user"), getSecretSettingsMap("secret") ); @@ -504,7 +503,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { try (var service = createOpenAiService()) { - var serviceSettingsMap = getServiceSettingsMap("model", "url", "org", null, true); + var serviceSettingsMap = getServiceSettingsMap("model", "url", "org", null, null, true); serviceSettingsMap.put("extra_key", "value"); var persistedConfig = getPersistedConfigMap(serviceSettingsMap, getTaskSettingsMap("user"), getSecretSettingsMap("secret")); @@ -533,7 +532,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTa taskSettingsMap.put("extra_key", "value"); var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", "url", "org", null, true), + getServiceSettingsMap("model", "url", "org", null, null, true), taskSettingsMap, getSecretSettingsMap("secret") ); @@ -559,7 +558,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInTa public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModel() throws IOException { try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", "url", "org", null, true), + getServiceSettingsMap("model", "url", "org", null, null, true), getTaskSettingsMap("user") ); @@ -594,7 +593,10 @@ public void testParsePersistedConfig_ThrowsErrorTryingToParseInvalidModel() thro public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUrlOrganization() throws IOException { try (var service = createOpenAiService()) { - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("model", null, null, null, true), getTaskSettingsMap(null)); + var persistedConfig = getPersistedConfigMap( + getServiceSettingsMap("model", null, null, null, null, true), + getTaskSettingsMap(null) + ); var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); @@ -612,7 +614,7 @@ public void testParsePersistedConfig_CreatesAnOpenAiEmbeddingsModelWithoutUserUr public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() throws IOException { try (var service = createOpenAiService()) { var persistedConfig = getPersistedConfigMap( - getServiceSettingsMap("model", "url", "org", null, true), + getServiceSettingsMap("model", "url", "org", null, null, true), getTaskSettingsMap("user") ); persistedConfig.config().put("extra_key", "value"); @@ -632,7 +634,7 @@ public void testParsePersistedConfig_DoesNotThrowWhenAnExtraKeyExistsInConfig() public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInServiceSettings() throws IOException { try (var service = createOpenAiService()) { - var serviceSettingsMap = getServiceSettingsMap("model", "url", "org", null, true); + var serviceSettingsMap = getServiceSettingsMap("model", "url", "org", null, null, true); serviceSettingsMap.put("extra_key", "value"); var persistedConfig = getPersistedConfigMap(serviceSettingsMap, getTaskSettingsMap("user")); @@ -655,7 +657,7 @@ public void testParsePersistedConfig_NotThrowWhenAnExtraKeyExistsInTaskSettings( var taskSettingsMap = getTaskSettingsMap("user"); taskSettingsMap.put("extra_key", "value"); - var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("model", "url", "org", null, true), taskSettingsMap); + var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("model", "url", "org", null, null, true), taskSettingsMap); var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); @@ -1308,24 +1310,4 @@ private Map getRequestConfigMap( Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings) ); } - - private PersistedConfig getPersistedConfigMap( - Map serviceSettings, - Map taskSettings, - Map secretSettings - ) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) - ); - } - - private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PersistedConfig( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java index cc0004a2d678c..10ccbb4eb39f6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java @@ -257,6 +257,92 @@ public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsIs assertThat(settings, is(new OpenAiEmbeddingsServiceSettings("m", (URI) null, null, null, null, null, true, null))); } + public void testFromMap_ThrowsException_WhenDimensionsAreZero() { + var modelId = "model-foo"; + var url = "https://www.abc.com"; + var org = "organization"; + var dimensions = 0; + + var settingsMap = getServiceSettingsMap(modelId, url, org, dimensions, null, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [dimensions] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenDimensionsAreNegative() { + var modelId = "model-foo"; + var url = "https://www.abc.com"; + var org = "organization"; + var dimensions = randomNegativeInt(); + + var settingsMap = getServiceSettingsMap(modelId, url, org, dimensions, null, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [dimensions] must be a positive integer;", + dimensions + ) + ) + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreZero() { + var modelId = "model-foo"; + var url = "https://www.abc.com"; + var org = "organization"; + var maxInputTokens = 0; + + var settingsMap = getServiceSettingsMap(modelId, url, org, null, maxInputTokens, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString("Validation Failed: 1: [service_settings] Invalid value [0]. [max_input_tokens] must be a positive integer;") + ); + } + + public void testFromMap_ThrowsException_WhenMaxInputTokensAreNegative() { + var modelId = "model-foo"; + var url = "https://www.abc.com"; + var org = "organization"; + var maxInputTokens = randomNegativeInt(); + + var settingsMap = getServiceSettingsMap(modelId, url, org, null, maxInputTokens, null); + + var thrownException = expectThrows( + ValidationException.class, + () -> OpenAiEmbeddingsServiceSettings.fromMap(settingsMap, ConfigurationParseContext.REQUEST) + ); + + assertThat( + thrownException.getMessage(), + containsString( + Strings.format( + "Validation Failed: 1: [service_settings] Invalid value [%d]. [max_input_tokens] must be a positive integer;", + maxInputTokens + ) + ) + ); + } + public void testFromMap_PersistentContext_DoesNotThrowException_WhenDimensionsSetByUserIsNull() { OpenAiEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(ServiceFields.DIMENSIONS, 1, ServiceFields.MODEL_ID, "m")), @@ -464,6 +550,7 @@ public static Map getServiceSettingsMap( @Nullable String url, @Nullable String org, @Nullable Integer dimensions, + @Nullable Integer maxInputTokens, @Nullable Boolean dimensionsSetByUser ) { var map = new HashMap(); @@ -481,6 +568,10 @@ public static Map getServiceSettingsMap( map.put(ServiceFields.DIMENSIONS, dimensions); } + if (maxInputTokens != null) { + map.put(ServiceFields.MAX_INPUT_TOKENS, maxInputTokens); + } + if (dimensionsSetByUser != null) { map.put(OpenAiEmbeddingsServiceSettings.DIMENSIONS_SET_BY_USER, dimensionsSetByUser); } diff --git a/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java b/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java index 2f6127c44957f..701bcd204fcfe 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java +++ b/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java @@ -10,7 +10,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; @@ -22,8 +21,8 @@ public class InferenceRestIT extends ESClientYamlSuiteTestCase { public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .setting("xpack.security.enabled", "false") .setting("xpack.security.http.ssl.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") .plugin("inference-service-test") - .feature(FeatureFlag.SEMANTIC_TEXT_ENABLED) .distribution(DistributionType.DEFAULT) .build(); diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml index 041dc05a8f5bb..d7f7e21e6f428 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml @@ -88,6 +88,7 @@ setup: task_type: text_embedding dimensions: 4 similarity: cosine + element_type: float chunks: - text: "these are not the droids you're looking for" embeddings: [0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416] @@ -156,11 +157,12 @@ setup: type: keyword --- -"Can be used as a nested field": +"Cannot be used directly as a nested field": - do: + catch: /semantic_text field \[nested.semantic\] cannot be nested/ indices.create: - index: test-copy_to-index + index: test-nested-index body: mappings: properties: @@ -173,3 +175,24 @@ setup: another_field: type: keyword +--- +"Cannot be used as a nested field on nested objects": + + - do: + catch: /semantic_text field \[nested.nested_object.semantic\] cannot be nested/ + indices.create: + index: test-nested-index + body: + mappings: + properties: + nested: + type: nested + properties: + nested_object: + type: object + properties: + semantic: + type: semantic_text + inference_id: sparse-inference-id + another_field: + type: keyword diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/20_semantic_text_field_mapping_incompatible_field_mapping.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/20_semantic_text_field_mapping_incompatible_field_mapping.yml index a7335728095a7..3d46c3b23d7e3 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/20_semantic_text_field_mapping_incompatible_field_mapping.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/20_semantic_text_field_mapping_incompatible_field_mapping.yml @@ -49,6 +49,7 @@ setup: task_type: text_embedding dimensions: 4 similarity: cosine + element_type: float chunks: - text: "these are not the droids you're looking for" embeddings: [0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416] @@ -73,6 +74,7 @@ setup: task_type: text_embedding dimensions: 5 similarity: cosine + element_type: float chunks: - text: "other text" embeddings: [0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416, 0.053438711911439896] @@ -94,6 +96,7 @@ setup: task_type: text_embedding dimensions: 4 similarity: cosine + element_type: float chunks: - text: "other text" embeddings: [0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416] @@ -115,6 +118,29 @@ setup: task_type: text_embedding dimensions: 4 similarity: dot_product + element_type: float + chunks: + - text: "other text" + embeddings: [0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416] + +--- +"Fails for non-compatible element type": + + - do: + catch: /Incompatible model settings for field \[dense_field\].+/ + index: + index: test-index + id: doc_2 + body: + dense_field: + text: "other text" + inference: + inference_id: dense-inference-id + model_settings: + task_type: text_embedding + dimensions: 4 + similarity: cosine + element_type: byte chunks: - text: "other text" embeddings: [0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416] @@ -159,6 +185,7 @@ setup: task_type: text_embedding dimensions: 4 similarity: cosine + element_type: float chunks: - text: "these are not the droids you're looking for" embeddings: [0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416] @@ -180,6 +207,7 @@ setup: task_type: text_embedding dimensions: 4 similarity: cosine + element_type: float chunks: - text: "these are not the droids you're looking for" @@ -218,6 +246,7 @@ setup: task_type: text_embedding dimensions: 4 similarity: cosine + element_type: float chunks: - embeddings: [ 0.04673296958208084, -0.03237321600317955, -0.02543032355606556, 0.056035321205854416 ] diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml index 9987b43822cc0..f467691600766 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml @@ -52,27 +52,27 @@ setup: --- "Calculates text expansion and embedding results for new documents": - - do: - index: - index: test-index - id: doc_1 - body: - sparse_field: "inference test" - dense_field: "another inference test" - non_inference_field: "non inference test" - - - do: - get: - index: test-index - id: doc_1 - - - match: { _source.sparse_field.text: "inference test" } - - exists: _source.sparse_field.inference.chunks.0.embeddings - - match: { _source.sparse_field.inference.chunks.0.text: "inference test" } - - match: { _source.dense_field.text: "another inference test" } - - exists: _source.dense_field.inference.chunks.0.embeddings - - match: { _source.dense_field.inference.chunks.0.text: "another inference test" } - - match: { _source.non_inference_field: "non inference test" } + - do: + index: + index: test-index + id: doc_1 + body: + sparse_field: "inference test" + dense_field: "another inference test" + non_inference_field: "non inference test" + + - do: + get: + index: test-index + id: doc_1 + + - match: { _source.sparse_field.text: "inference test" } + - exists: _source.sparse_field.inference.chunks.0.embeddings + - match: { _source.sparse_field.inference.chunks.0.text: "inference test" } + - match: { _source.dense_field.text: "another inference test" } + - exists: _source.dense_field.inference.chunks.0.embeddings + - match: { _source.dense_field.inference.chunks.0.text: "another inference test" } + - match: { _source.non_inference_field: "non inference test" } --- "Inference fields do not create new mappings": @@ -89,10 +89,10 @@ setup: indices.get_mapping: index: test-index - - match: {test-index.mappings.properties.sparse_field.type: semantic_text} - - match: {test-index.mappings.properties.dense_field.type: semantic_text} - - match: {test-index.mappings.properties.non_inference_field.type: text} - - length: {test-index.mappings.properties: 3} + - match: { test-index.mappings.properties.sparse_field.type: semantic_text } + - match: { test-index.mappings.properties.dense_field.type: semantic_text } + - match: { test-index.mappings.properties.non_inference_field.type: text } + - length: { test-index.mappings.properties: 3 } --- "Sparse vector results are indexed as nested chunks and searchable": @@ -114,13 +114,13 @@ setup: nested: path: sparse_field.inference.chunks query: - text_expansion: - sparse_field.inference.chunks.embeddings: - model_id: sparse-inference-id - model_text: "you know, for testing" + sparse_vector: + field: sparse_field.inference.chunks.embeddings + inference_id: sparse-inference-id + query: "you know, for testing" - - match: { hits.total.value: 2 } - - match: { hits.total.relation: eq } + - match: { hits.total.value: 2 } + - match: { hits.total.relation: eq } - length: { hits.hits.0._source.sparse_field.inference.chunks: 2 } - length: { hits.hits.1._source.sparse_field.inference.chunks: 2 } @@ -135,17 +135,17 @@ setup: path: sparse_field.inference.chunks inner_hits: _source: false - fields: [sparse_field.inference.chunks.text] + fields: [ sparse_field.inference.chunks.text ] query: - text_expansion: - sparse_field.inference.chunks.embeddings: - model_id: sparse-inference-id - model_text: "you know, for testing" + sparse_vector: + field: sparse_field.inference.chunks.embeddings + inference_id: sparse-inference-id + query: "you know, for testing" - - match: { hits.total.value: 2 } - - match: { hits.total.relation: eq } - - match: { hits.hits.0.inner_hits.sparse_field\.inference\.chunks.hits.total.value: 2 } - - match: { hits.hits.0.inner_hits.sparse_field\.inference\.chunks.hits.total.relation: eq } + - match: { hits.total.value: 2 } + - match: { hits.total.relation: eq } + - match: { hits.hits.0.inner_hits.sparse_field\.inference\.chunks.hits.total.value: 2 } + - match: { hits.hits.0.inner_hits.sparse_field\.inference\.chunks.hits.total.relation: eq } - length: { hits.hits.0.inner_hits.sparse_field\.inference\.chunks.hits.hits.0.fields.sparse_field\.inference\.chunks.0.text: 1 } - length: { hits.hits.0.inner_hits.sparse_field\.inference\.chunks.hits.hits.1.fields.sparse_field\.inference\.chunks.0.text: 1 } @@ -180,8 +180,8 @@ setup: model_id: dense-inference-id model_text: "you know, for testing" - - match: { hits.total.value: 2 } - - match: { hits.total.relation: eq } + - match: { hits.total.value: 2 } + - match: { hits.total.relation: eq } - length: { hits.hits.0._source.dense_field.inference.chunks: 2 } - length: { hits.hits.1._source.dense_field.inference.chunks: 2 } @@ -196,7 +196,7 @@ setup: path: dense_field.inference.chunks inner_hits: _source: false - fields: [dense_field.inference.chunks.text] + fields: [ dense_field.inference.chunks.text ] query: knn: field: dense_field.inference.chunks.embeddings @@ -205,10 +205,10 @@ setup: model_id: dense-inference-id model_text: "you know, for testing" - - match: { hits.total.value: 2 } - - match: { hits.total.relation: eq } - - match: { hits.hits.0.inner_hits.dense_field\.inference\.chunks.hits.total.value: 2 } - - match: { hits.hits.0.inner_hits.dense_field\.inference\.chunks.hits.total.relation: eq } + - match: { hits.total.value: 2 } + - match: { hits.total.relation: eq } + - match: { hits.hits.0.inner_hits.dense_field\.inference\.chunks.hits.total.value: 2 } + - match: { hits.hits.0.inner_hits.dense_field\.inference\.chunks.hits.total.relation: eq } - length: { hits.hits.0.inner_hits.dense_field\.inference\.chunks.hits.hits.0.fields.dense_field\.inference\.chunks.0.text: 1 } - length: { hits.hits.0.inner_hits.dense_field\.inference\.chunks.hits.hits.1.fields.dense_field\.inference\.chunks.0.text: 1 } @@ -265,13 +265,13 @@ setup: index: destination-index id: doc_1 - - match: { _source.sparse_field.text: "inference test" } - - match: { _source.sparse_field.inference.chunks.0.text: "inference test" } - - match: { _source.sparse_field.inference.chunks.0.embeddings: $sparse_field_embedding } - - match: { _source.dense_field.text: "another inference test" } + - match: { _source.sparse_field.text: "inference test" } + - match: { _source.sparse_field.inference.chunks.0.text: "inference test" } + - match: { _source.sparse_field.inference.chunks.0.embeddings: $sparse_field_embedding } + - match: { _source.dense_field.text: "another inference test" } - match: { _source.dense_field.inference.chunks.0.text: "another inference test" } - match: { _source.dense_field.inference.chunks.0.embeddings: $dense_field_embedding } - - match: { _source.non_inference_field: "non inference test" } + - match: { _source.non_inference_field: "non inference test" } --- "Fails for non-existent inference": @@ -338,14 +338,14 @@ setup: index: test-copy-to-index id: doc_1 - - match: { _source.sparse_field.text: "inference test" } + - match: { _source.sparse_field.text: "inference test" } - length: { _source.sparse_field.inference.chunks: 3 } - - match: { _source.sparse_field.inference.chunks.0.text: "another copy_to inference test" } - - exists: _source.sparse_field.inference.chunks.0.embeddings - - match: { _source.sparse_field.inference.chunks.1.text: "copy_to inference test" } - - exists: _source.sparse_field.inference.chunks.1.embeddings - - match: { _source.sparse_field.inference.chunks.2.text: "inference test" } - - exists: _source.sparse_field.inference.chunks.2.embeddings + - match: { _source.sparse_field.inference.chunks.0.text: "another copy_to inference test" } + - exists: _source.sparse_field.inference.chunks.0.embeddings + - match: { _source.sparse_field.inference.chunks.1.text: "copy_to inference test" } + - exists: _source.sparse_field.inference.chunks.1.embeddings + - match: { _source.sparse_field.inference.chunks.2.text: "inference test" } + - exists: _source.sparse_field.inference.chunks.2.embeddings --- "Calculates embeddings for bulk operations - index": @@ -455,8 +455,8 @@ setup: id: doc_1 - match: { _source.sparse_field.text: "updated inference test" } - - match: { _source.sparse_field.inference.chunks.0.text: "updated inference test" } + - match: { _source.sparse_field.inference.chunks.0.text: "updated inference test" } - exists: _source.sparse_field.inference.chunks.0.embeddings - - match: { _source.dense_field.text: "another updated inference test" } - - match: { _source.dense_field.inference.chunks.0.text: "another updated inference test" } + - match: { _source.dense_field.text: "another updated inference test" } + - match: { _source.dense_field.inference.chunks.0.text: "another updated inference test" } - exists: _source.dense_field.inference.chunks.0.embeddings diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml index 8fffa7fa8c7ef..5ee7a943c4d35 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml @@ -121,6 +121,67 @@ setup: - close_to: { hits.hits.0._score: { value: 1.0, error: 0.0001 } } - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } +--- +"Query using a dense embedding model that uses byte embeddings": + - skip: + features: [ "headers", "close_to" ] + + - do: + inference.put: + task_type: text_embedding + inference_id: dense-inference-byte-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 10, + "api_key": "abc64", + "similarity": "COSINE", + "element_type": "byte" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-dense-byte-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: dense-inference-byte-id + non_inference_field: + type: text + + - do: + index: + index: test-dense-byte-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-dense-byte-index + body: + query: + semantic: + field: "inference_field" + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 1.0, error: 0.0001 } } + - length: { hits.hits.0._source.inference_field.inference.chunks: 2 } + --- "Apply boost and query name": - skip: diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/50_semantic_text_query_inference_endpoint_changes.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/50_semantic_text_query_inference_endpoint_changes.yml index fd656c9d5d950..f6a7073914609 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/50_semantic_text_query_inference_endpoint_changes.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/50_semantic_text_query_inference_endpoint_changes.yml @@ -81,6 +81,7 @@ setup: - do: inference.delete: inference_id: sparse-inference-id + force: true - do: inference.put: @@ -119,6 +120,7 @@ setup: - do: inference.delete: inference_id: dense-inference-id + force: true - do: inference.put: @@ -155,6 +157,7 @@ setup: - do: inference.delete: inference_id: dense-inference-id + force: true - do: inference.put: diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_semantic_text_exists_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_semantic_text_exists_query.yml new file mode 100644 index 0000000000000..11bd1f87aab06 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_semantic_text_exists_query.yml @@ -0,0 +1,144 @@ +setup: + - requires: + cluster_features: "gte_v8.15.0" + reason: semantic_text introduced in 8.15.0 + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: text_embedding + inference_id: dense-inference-id + body: > + { + "service": "text_embedding_test_service", + "service_settings": { + "model": "my_model", + "dimensions": 10, + "api_key": "abc64", + "similarity": "COSINE" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-sparse-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + + - do: + indices.create: + index: test-dense-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: dense-inference-id + +--- +"Exists query with no indexed documents": + - do: + search: + index: test-sparse-index + body: + query: + exists: + field: "inference_field" + + - match: { hits.total.value: 0 } + +--- +"Exists query with null indexed documents": + - do: + index: + index: test-sparse-index + id: doc + body: + inference_field: null + refresh: true + + - do: + search: + index: test-sparse-index + body: + query: + exists: + field: "inference_field" + + - match: { hits.total.value: 0 } + + - do: + index: + index: test-dense-index + id: doc + body: + inference_field: null + refresh: true + + - do: + search: + index: test-dense-index + body: + query: + exists: + field: "inference_field" + + - match: { hits.total.value: 0 } + +--- +"Exists query with indexed documents": + - do: + index: + index: test-sparse-index + id: doc + body: + inference_field: "hello world" + refresh: true + + - do: + search: + index: test-sparse-index + body: + query: + exists: + field: "inference_field" + + - match: { hits.total.value: 1 } + + - do: + index: + index: test-dense-index + id: doc + body: + inference_field: "hello world" + refresh: true + + - do: + search: + index: test-dense-index + body: + query: + exists: + field: "inference_field" + + - match: { hits.total.value: 1 } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml new file mode 100644 index 0000000000000..2e01e2b9c8d04 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml @@ -0,0 +1,90 @@ +setup: + - requires: + cluster_features: "text_similarity_reranker_retriever_supported" + reason: semantic reranking introduced in 8.15.0 + test_runner_features: "close_to" + + - do: + inference.put: + task_type: rerank + inference_id: my-rerank-model + body: > + { + "service": "test_reranking_service", + "service_settings": { + "model_id": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-index + body: + mappings: + properties: + text: + type: text + topic: + type: keyword + + - do: + index: + index: test-index + id: doc_1 + body: + text: "As seen from Earth, a solar eclipse happens when the Moon is directly between the Earth and the Sun." + topic: ["science"] + refresh: true + + - do: + index: + index: test-index + id: doc_2 + body: + text: "The phases of the Moon come from the position of the Moon relative to the Earth and Sun." + topic: ["science"] + refresh: true + + - do: + index: + index: test-index + id: doc_3 + body: + text: "Sun Moon Lake is a lake in Nantou County, Taiwan. It is the largest lake in Taiwan." + topic: ["geography"] + refresh: true +--- +"Simple text similarity rank retriever": + + - do: + search: + index: test-index + body: + track_total_hits: true + fields: [ "text", "topic" ] + retriever: + text_similarity_reranker: + retriever: + standard: + query: + term: + topic: "science" + rank_window_size: 10 + inference_id: my-rerank-model + inference_text: "How often does the moon hide the sun?" + field: text + size: 10 + + - match: { hits.total.value : 2 } + - length: { hits.hits: 2 } + + - match: { hits.hits.0._id: "doc_2" } + - match: { hits.hits.0._rank: 1 } + - close_to: { hits.hits.0._score: { value: 0.4, error: 0.001 } } + + - match: { hits.hits.1._id: "doc_1" } + - match: { hits.hits.1._rank: 2 } + - close_to: { hits.hits.1._score: { value: 0.2, error: 0.001 } } diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 33efabf101be7..2602c2947420b 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -143,7 +143,7 @@ public static final class Builder extends FieldMapper.Builder { return parsedMetrics; }, m -> toType(m).metrics, XContentBuilder::enumSet, Objects::toString).addValidator(v -> { if (v == null || v.isEmpty()) { - throw new IllegalArgumentException("Property [" + Names.METRICS + "] is required for field [" + name() + "]."); + throw new IllegalArgumentException("Property [" + Names.METRICS + "] is required for field [" + leafName() + "]."); } }); @@ -209,21 +209,23 @@ public AggregateDoubleMetricFieldMapper build(MapperBuilderContext context) { } if (metrics.getValue().contains(defaultMetric.getValue()) == false) { - throw new IllegalArgumentException("Property [" + Names.DEFAULT_METRIC + "] is required for field [" + name() + "]."); + throw new IllegalArgumentException( + "Property [" + Names.DEFAULT_METRIC + "] is required for field [" + leafName() + "]." + ); } } if (metrics.getValue().contains(defaultMetric.getValue()) == false) { // The default_metric is not defined in the "metrics" field throw new IllegalArgumentException( - "Default metric [" + defaultMetric.getValue() + "] is not defined in the metrics of field [" + name() + "]." + "Default metric [" + defaultMetric.getValue() + "] is not defined in the metrics of field [" + leafName() + "]." ); } EnumMap metricMappers = new EnumMap<>(Metric.class); // Instantiate one NumberFieldMapper instance for each metric for (Metric m : this.metrics.getValue()) { - String fieldName = subfieldName(name(), m); + String fieldName = subfieldName(leafName(), m); NumberFieldMapper.Builder builder; if (m == Metric.value_count) { @@ -259,14 +261,14 @@ public AggregateDoubleMetricFieldMapper build(MapperBuilderContext context) { }, () -> new EnumMap<>(Metric.class))); AggregateDoubleMetricFieldType metricFieldType = new AggregateDoubleMetricFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), meta.getValue(), timeSeriesMetric.getValue() ); metricFieldType.setMetricFields(metricFields); metricFieldType.setDefaultMetric(defaultMetric.getValue()); - return new AggregateDoubleMetricFieldMapper(name(), metricFieldType, metricMappers, this); + return new AggregateDoubleMetricFieldMapper(leafName(), metricFieldType, metricMappers, this); } } @@ -586,7 +588,7 @@ protected boolean supportsParsingObject() { @Override protected void parseCreateField(DocumentParserContext context) throws IOException { - context.path().add(simpleName()); + context.path().add(leafName()); XContentParser.Token token; XContentSubParser subParser = null; EnumMap metricsParsed = new EnumMap<>(Metric.class); @@ -663,7 +665,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio if (context.doc().getByKey(delegateFieldMapper.fieldType().name()) != null) { throw new IllegalArgumentException( "Field [" - + name() + + fullPath() + "] of type [" + typeName() + "] does not support indexing multiple values for the same field in the same document" @@ -685,10 +687,10 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } if (malformedDataForSyntheticSource != null) { - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), malformedDataForSyntheticSource)); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), malformedDataForSyntheticSource)); } - context.addIgnoredField(name()); + context.addIgnoredField(fullPath()); context.path().remove(); return; } @@ -705,7 +707,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), ignoreMalformedByDefault, indexCreatedVersion, indexMode).metric(metricType).init(this); + return new Builder(leafName(), ignoreMalformedByDefault, indexCreatedVersion, indexMode).metric(metricType).init(this); } @Override @@ -716,10 +718,10 @@ protected SyntheticSourceMode syntheticSourceMode() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { return new CompositeSyntheticFieldLoader( - simpleName(), - name(), - new AggregateMetricSyntheticFieldLoader(name(), simpleName(), metrics), - new CompositeSyntheticFieldLoader.MalformedValuesLayer(name()) + leafName(), + fullPath(), + new AggregateMetricSyntheticFieldLoader(fullPath(), leafName(), metrics), + new CompositeSyntheticFieldLoader.MalformedValuesLayer(fullPath()) ); } diff --git a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java index c32e7e583c787..4b7b27bf2cec3 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java @@ -124,7 +124,7 @@ public void testUsedInScript() throws IOException { SearchLookup lookup = new SearchLookup( searchExecutionContext::getFieldType, (mft, lookupSupplier, fdo) -> mft.fielddataBuilder( - new FieldDataContext("test", lookupSupplier, searchExecutionContext::sourcePath, fdo) + new FieldDataContext("test", null, lookupSupplier, searchExecutionContext::sourcePath, fdo) ).build(null, null), (ctx, doc) -> null ); diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index c27b3c8207102..f2222e0970ae0 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -74,7 +74,7 @@ private static ConstantKeywordFieldMapper toType(FieldMapper in) { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } public static class Builder extends FieldMapper.Builder { @@ -110,8 +110,8 @@ public ConstantKeywordFieldMapper build(MapperBuilderContext context) { ); } return new ConstantKeywordFieldMapper( - name(), - new ConstantKeywordFieldType(context.buildFullName(name()), value.getValue(), meta.getValue()) + leafName(), + new ConstantKeywordFieldType(context.buildFullName(leafName()), value.getValue(), meta.getValue()) ); } } @@ -316,19 +316,19 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio final String value = parser.textOrNull(); if (value == null) { - throw new IllegalArgumentException("[constant_keyword] field [" + name() + "] doesn't accept [null] values"); + throw new IllegalArgumentException("[constant_keyword] field [" + fullPath() + "] doesn't accept [null] values"); } if (fieldType().value == null) { ConstantKeywordFieldType newFieldType = new ConstantKeywordFieldType(fieldType().name(), value, fieldType().meta()); - Mapper update = new ConstantKeywordFieldMapper(simpleName(), newFieldType); + Mapper update = new ConstantKeywordFieldMapper(leafName(), newFieldType); boolean dynamicMapperAdded = context.addDynamicMapper(update); // the mapper is already part of the mapping, we're just updating it with the new value assert dynamicMapperAdded; } else if (Objects.equals(fieldType().value, value) == false) { throw new IllegalArgumentException( "[constant_keyword] field [" - + name() + + fullPath() + "] only accepts values that are equal to the value defined in the mappings [" + fieldType().value() + "], but got [" @@ -374,13 +374,13 @@ public boolean hasValue() { @Override public void write(XContentBuilder b) throws IOException { if (fieldType().value != null) { - b.field(simpleName(), fieldType().value); + b.field(leafName(), fieldType().value); } } @Override public String fieldName() { - return name(); + return fullPath(); } }; } diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java index 1152d93f66b38..92aac7897bcfd 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; @@ -240,6 +241,11 @@ public String indexName() { throw new UnsupportedOperationException(); } + @Override + public IndexSettings indexSettings() { + throw new UnsupportedOperationException(); + } + @Override public MappedFieldType.FieldExtractPreference fieldExtractPreference() { return MappedFieldType.FieldExtractPreference.NONE; diff --git a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/30_sort.yml b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/30_sort.yml new file mode 100644 index 0000000000000..8d489b8211eb1 --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/30_sort.yml @@ -0,0 +1,65 @@ +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + keyword: + type: keyword + + - do: + indices.create: + index: test_numeric + body: + mappings: + properties: + keyword: + type: long + + - do: + indices.create: + index: test_constant + body: + mappings: + properties: + keyword: + type: constant_keyword + value: value + + - do: + bulk: + refresh: true + body: | + { "index": {"_index" : "test", "_id": 3} } + { "keyword": "abc" } + { "index": {"_index" : "test_numeric", "_id": 2} } + { "keyword": 42 } + { "index": {"_index" : "test_constant", "_id": 1} } + {} + +--- +"constant_keyword mixed sort": + - do: + search: + index: test,test_constant + body: + sort: keyword + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "1" } + +--- +"constant_keyword invalid mixed sort": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: Better error message in 8.15.0 + + - do: + catch: /Can't sort on field \[keyword\]\; the field has incompatible sort types/ + search: + index: test* + body: + sort: keyword + diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java index 0025d3f7dd7b2..dcf4ba7a3ce25 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java @@ -290,16 +290,16 @@ protected Parameter[] getParameters() { public FieldMapper build(MapperBuilderContext context) { BinaryFieldMapper countFieldMapper = new BinaryFieldMapper.Builder( - name() + COUNT_FIELD_NAME_SUFFIX, + leafName() + COUNT_FIELD_NAME_SUFFIX, context.isSourceSynthetic() ).docValues(true).build(context); boolean isIndexed = indexed.getValue(); FieldType ft = isIndexed ? FIELD_TYPE_INDEXED : FIELD_TYPE_NOT_INDEXED; return new CountedKeywordFieldMapper( - name(), + leafName(), ft, new CountedKeywordFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), isIndexed, false, true, @@ -354,12 +354,12 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio int i = 0; int[] counts = new int[values.size()]; for (Map.Entry value : values.entrySet()) { - context.doc().add(new KeywordFieldMapper.KeywordField(name(), new BytesRef(value.getKey()), fieldType)); + context.doc().add(new KeywordFieldMapper.KeywordField(fullPath(), new BytesRef(value.getKey()), fieldType)); counts[i++] = value.getValue(); } BytesStreamOutput streamOutput = new BytesStreamOutput(); streamOutput.writeVIntArray(counts); - context.doc().add(new BinaryDocValuesField(countFieldMapper.name(), streamOutput.bytes().toBytesRef())); + context.doc().add(new BinaryDocValuesField(countFieldMapper.fullPath(), streamOutput.bytes().toBytesRef())); } private void parseArray(DocumentParserContext context, SortedMap values) throws IOException { @@ -401,7 +401,7 @@ public Iterator iterator() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index e356fc2756c56..eae4094fee0d0 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -158,7 +158,7 @@ private String parseNullValueAsString(Object o) { parseUnsignedLong(o); // confirm that null_value is a proper unsigned_long return (o instanceof BytesRef) ? ((BytesRef) o).utf8ToString() : o.toString(); } catch (Exception e) { - throw new MapperParsingException("Error parsing [null_value] on field [" + name() + "]: " + e.getMessage(), e); + throw new MapperParsingException("Error parsing [null_value] on field [" + leafName() + "]: " + e.getMessage(), e); } } @@ -200,7 +200,7 @@ public UnsignedLongFieldMapper build(MapperBuilderContext context) { dimension.setValue(true); } UnsignedLongFieldType fieldType = new UnsignedLongFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.getValue(), stored.getValue(), hasDocValues.getValue(), @@ -211,7 +211,7 @@ public UnsignedLongFieldMapper build(MapperBuilderContext context) { indexMode ); return new UnsignedLongFieldMapper( - name(), + leafName(), fieldType, multiFieldsBuilder.build(this, context), copyTo, @@ -636,7 +636,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio context.addIgnoredField(mappedFieldType.name()); if (isSourceSynthetic) { // Save a copy of the field so synthetic source can load it - context.doc().add(IgnoreMalformedStoredValues.storedField(name(), context.parser())); + context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser())); } return; } else { @@ -679,7 +679,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), ignoreMalformedByDefault, indexMode).dimension(dimension).metric(metricType).init(this); + return new Builder(leafName(), ignoreMalformedByDefault, indexMode).dimension(dimension).metric(metricType).init(this); } /** @@ -753,9 +753,9 @@ protected static long sortableSignedLongToUnsigned(long value) { @Override public void doValidate(MappingLookup lookup) { - if (dimension && null != lookup.nestedLookup().getNestedParent(name())) { + if (dimension && null != lookup.nestedLookup().getNestedParent(fullPath())) { throw new IllegalArgumentException( - TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + name() + "]" + TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM + " can't be configured in nested field [" + fullPath() + "]" ); } } @@ -769,15 +769,19 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (hasDocValues == false) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it doesn't have doc values" + "field [" + + fullPath() + + "] of type [" + + typeName() + + "] doesn't support synthetic source because it doesn't have doc values" ); } if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new SortedNumericDocValuesSyntheticFieldLoader(name(), simpleName(), ignoreMalformed()) { + return new SortedNumericDocValuesSyntheticFieldLoader(fullPath(), leafName(), ignoreMalformed()) { @Override protected void writeValue(XContentBuilder b, long value) throws IOException { b.value(DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value)); diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java index bab91e5d99eca..1113b9ff57a72 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java @@ -112,14 +112,14 @@ static class Builder extends FieldMapper.Builder { } private VersionStringFieldType buildFieldType(MapperBuilderContext context, FieldType fieldtype) { - return new VersionStringFieldType(context.buildFullName(name()), fieldtype, meta.getValue()); + return new VersionStringFieldType(context.buildFullName(leafName()), fieldtype, meta.getValue()); } @Override public VersionStringFieldMapper build(MapperBuilderContext context) { FieldType fieldtype = new FieldType(Defaults.FIELD_TYPE); return new VersionStringFieldMapper( - name(), + leafName(), fieldtype, buildFieldType(context, fieldtype), multiFieldsBuilder.build(this, context), @@ -442,7 +442,7 @@ public String toString() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); + return new Builder(leafName()).init(this); } @Override @@ -454,10 +454,10 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new SortedSetDocValuesSyntheticFieldLoader(name(), simpleName(), null, false) { + return new SortedSetDocValuesSyntheticFieldLoader(fullPath(), leafName(), null, false) { @Override protected BytesRef convert(BytesRef value) { return VersionEncoder.decodeVersion(value); diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlLearningToRankRescorerIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlLearningToRankRescorerIT.java index e5238c4aa44f0..6c6e1caf06584 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlLearningToRankRescorerIT.java +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlLearningToRankRescorerIT.java @@ -31,10 +31,33 @@ public void setupModelAndData() throws IOException { putLearningToRankModel(MODEL_ID, """ { "description": "super complex model for tests", - "input": { "field_names": ["cost", "product"] }, "inference_config": { "learning_to_rank": { "feature_extractors": [ + { + "query_extractor": { + "feature_name": "cost", + "query": {"script_score": {"query": {"match_all":{}}, "script": {"source": "return doc['cost'].value;"}}} + } + }, + { + "query_extractor": { + "feature_name": "type_tv", + "query": {"constant_score": {"filter": {"term": { "product": "TV" }}, "boost": 1.0}} + } + }, + { + "query_extractor": { + "feature_name": "type_vcr", + "query": {"constant_score": {"filter": {"term": { "product": "VCR" }}, "boost": 1.0}} + } + }, + { + "query_extractor": { + "feature_name": "type_laptop", + "query": {"constant_score": {"filter": {"term": { "product": "Laptop" }}, "boost": 1.0}} + } + }, { "query_extractor": { "feature_name": "two", @@ -51,16 +74,6 @@ public void setupModelAndData() throws IOException { } }, "definition": { - "preprocessors" : [{ - "one_hot_encoding": { - "field": "product", - "hot_map": { - "TV": "type_tv", - "VCR": "type_vcr", - "Laptop": "type_laptop" - } - } - }], "trained_model": { "ensemble": { "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop", "two", "product_bm25"], @@ -351,7 +364,6 @@ public void testModelCacheIsFlushedOnModelChange() throws IOException { deleteLearningToRankModel(MODEL_ID); putLearningToRankModel(MODEL_ID, """ { - "input": { "field_names": ["cost"] }, "inference_config": { "learning_to_rank": { "feature_extractors": [ diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index f2ec17093bb93..0869ae394d3de 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -181,6 +181,7 @@ tasks.named("yamlRestTest").configure { 'ml/inference_crud/Test put model model aliases with nlp model', 'ml/inference_processor/Test create processor with missing mandatory fields', 'ml/learning_to_rank_rescorer/Test rescore with missing model', + 'ml/learning_to_rank_rescorer/Test model input validation', 'ml/inference_stats_crud/Test get stats given missing trained model', 'ml/inference_stats_crud/Test get stats given expression without matches and allow_no_match is false', 'ml/jobs_crud/Test cannot create job with model snapshot id set', diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml index e1ec400ce64bd..2253ecfc79f6f 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/multi_cluster/40_text_expansion.yml @@ -131,6 +131,8 @@ teardown: --- "Test text expansion search": + - requires: + test_runner_features: [ "allowed_warnings" ] - do: search: index: index-with-sparse-vector @@ -140,13 +142,17 @@ teardown: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test text expansion search with pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -161,13 +167,17 @@ teardown: pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test named, boosted text expansion search with pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: search: @@ -181,6 +191,9 @@ teardown: pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } - match: { hits.hits.0._score: 3.0 } @@ -199,15 +212,19 @@ teardown: tokens_weight_threshold: 0.4 _name: i-like-naming-my-queries boost: 100.0 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } - - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + - match: { hits.hits.0.matched_queries: [ "i-like-naming-my-queries" ] } - match: { hits.hits.0._score: 300.0 } --- "Test text expansion search with default pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -219,14 +236,18 @@ teardown: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" - pruning_config: {} + pruning_config: { } + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test text expansion search with weighted tokens rescoring only pruned tokens": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -242,12 +263,16 @@ teardown: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 0 } --- "Test weighted tokens search": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -257,18 +282,22 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 1 tokens_weight_threshold: 0.4 only_score_pruned_tokens: false + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test weighted tokens search with default pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -278,15 +307,19 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] - pruning_config: {} + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] + pruning_config: { } + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test weighted tokens search only scoring pruned tokens": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -296,17 +329,21 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 0 } --- "Test weighted tokens search that prunes tokens based on frequency": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -316,9 +353,12 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}, {"is": 1.0}, {"the": 1.0}, {"best": 1.0}, {"of": 1.0}, {"the": 1.0}, {"bunch": 1.0}] + tokens: [ { "the": 1.0 }, { "octopus": 1.0 }, { "comforter": 1.0 }, { "is": 1.0 }, { "the": 1.0 }, { "best": 1.0 }, { "of": 1.0 }, { "the": 1.0 }, { "bunch": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 3 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 0 } diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml index e1ec400ce64bd..2253ecfc79f6f 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/src/test/resources/rest-api-spec/test/remote_cluster/40_text_expansion.yml @@ -131,6 +131,8 @@ teardown: --- "Test text expansion search": + - requires: + test_runner_features: [ "allowed_warnings" ] - do: search: index: index-with-sparse-vector @@ -140,13 +142,17 @@ teardown: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test text expansion search with pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -161,13 +167,17 @@ teardown: pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test named, boosted text expansion search with pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: search: @@ -181,6 +191,9 @@ teardown: pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } - match: { hits.hits.0._score: 3.0 } @@ -199,15 +212,19 @@ teardown: tokens_weight_threshold: 0.4 _name: i-like-naming-my-queries boost: 100.0 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } - - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + - match: { hits.hits.0.matched_queries: [ "i-like-naming-my-queries" ] } - match: { hits.hits.0._score: 300.0 } --- "Test text expansion search with default pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -219,14 +236,18 @@ teardown: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" - pruning_config: {} + pruning_config: { } + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test text expansion search with weighted tokens rescoring only pruned tokens": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -242,12 +263,16 @@ teardown: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 0 } --- "Test weighted tokens search": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -257,18 +282,22 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 1 tokens_weight_threshold: 0.4 only_score_pruned_tokens: false + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test weighted tokens search with default pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -278,15 +307,19 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] - pruning_config: {} + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] + pruning_config: { } + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test weighted tokens search only scoring pruned tokens": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -296,17 +329,21 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 0 } --- "Test weighted tokens search that prunes tokens based on frequency": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -316,9 +353,12 @@ teardown: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}, {"is": 1.0}, {"the": 1.0}, {"best": 1.0}, {"of": 1.0}, {"the": 1.0}, {"bunch": 1.0}] + tokens: [ { "the": 1.0 }, { "octopus": 1.0 }, { "comforter": 1.0 }, { "is": 1.0 }, { "the": 1.0 }, { "best": 1.0 }, { "of": 1.0 }, { "the": 1.0 }, { "bunch": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 3 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 0 } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java index 8cb13398a70ae..fec85730aaf2b 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java @@ -95,6 +95,9 @@ public void testCondition() throws Exception { closeJob(job.getId()); List records = getRecords(job.getId()); + // remove records that are not anomalies + records.removeIf(record -> record.getInitialRecordScore() < 1e-5); + assertThat(records.size(), equalTo(1)); assertThat(records.get(0).getByFieldValue(), equalTo("high")); long firstRecordTimestamp = records.get(0).getTimestamp().getTime(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlInitializationServiceIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlInitializationServiceIT.java index 30f84a97bcfb0..1d67639f712a0 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlInitializationServiceIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlInitializationServiceIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlDailyMaintenanceService; import org.elasticsearch.xpack.ml.MlInitializationService; +import org.elasticsearch.xpack.ml.inference.adaptiveallocations.AdaptiveAllocationsScalerService; import org.junit.Before; import java.util.List; @@ -47,7 +48,14 @@ public void setUpMocks() { when(threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); MlDailyMaintenanceService mlDailyMaintenanceService = mock(MlDailyMaintenanceService.class); ClusterService clusterService = mock(ClusterService.class); - mlInitializationService = new MlInitializationService(client(), threadPool, mlDailyMaintenanceService, clusterService); + AdaptiveAllocationsScalerService adaptiveAllocationsScalerService = mock(AdaptiveAllocationsScalerService.class); + mlInitializationService = new MlInitializationService( + client(), + threadPool, + mlDailyMaintenanceService, + adaptiveAllocationsScalerService, + clusterService + ); } public void testThatMlIndicesBecomeHiddenWhenTheNodeBecomesMaster() throws Exception { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextExpansionQueryIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextExpansionQueryIT.java index 26a18bc6d1d79..f1e8c9a67df44 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextExpansionQueryIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TextExpansionQueryIT.java @@ -8,8 +8,10 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.core.Strings; import org.elasticsearch.xpack.core.ml.utils.MapHelper; @@ -269,6 +271,8 @@ public void testSearchWithMissingModel() throws IOException { protected Response textExpansionSearch(String index, String modelText, String modelId, String fieldName) throws IOException { Request request = new Request("GET", index + "/_search?error_trace=true"); + // Handle REST deprecation for text_expansion query + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); request.setJsonEntity(Strings.format(""" { @@ -281,6 +285,7 @@ protected Response textExpansionSearch(String index, String modelText, String mo } } }""", fieldName, modelId, modelText)); + return client().performRequest(request); } diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java index f17d5bf00297f..b2a0b60aed7ba 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java @@ -31,151 +31,166 @@ public void setupModelAndData() throws IOException { putRegressionModel(MODEL_ID, """ { "description": "super complex model for tests", - "input": {"field_names": ["cost", "product"]}, "inference_config": { "learning_to_rank": { "feature_extractors": [ { "query_extractor": { - "feature_name": "two", - "query": {"script_score": {"query": {"match_all":{}}, "script": {"source": "return 2.0;"}}} + "feature_name": "cost", + "query": {"script_score": {"query": {"match_all":{}}, "script": {"source": "return doc['cost'].value;"}}} } }, { "query_extractor": { - "feature_name": "product_bm25", - "query": {"term": {"product": "{{keyword}}"}} + "feature_name": "type_tv", + "query": {"constant_score": {"filter": {"term": { "product": "TV" }}, "boost": 1.0}} } + }, + { + "query_extractor": { + "feature_name": "type_vcr", + "query": {"constant_score": {"filter": {"term": { "product": "VCR" }}, "boost": 1.0}} + } + }, + { + "query_extractor": { + "feature_name": "type_laptop", + "query": {"constant_score": {"filter": {"term": { "product": "Laptop" }}, "boost": 1.0}} + } + }, + { + "query_extractor": { + "feature_name": "two", + "query": { "script_score": { "query": { "match_all": {} }, "script": { "source": "return 2.0;" } } } + } + }, + { + "query_extractor": { + "feature_name": "product_bm25", + "query": { "term": { "product": "{{keyword}}" } } + } } ] } }, "definition": { - "preprocessors" : [{ - "one_hot_encoding": { - "field": "product", - "hot_map": { - "TV": "type_tv", - "VCR": "type_vcr", - "Laptop": "type_laptop" - } - } - }], "trained_model": { "ensemble": { "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop", "two", "product_bm25"], "target_type": "regression", "trained_models": [ - { - "tree": { - "feature_names": ["cost"], - "tree_structure": [ - { - "node_index": 0, - "split_feature": 0, - "split_gain": 12, - "threshold": 400, - "decision_type": "lte", - "default_left": true, - "left_child": 1, - "right_child": 2 - }, - { - "node_index": 1, - "leaf_value": 5.0 - }, - { - "node_index": 2, - "leaf_value": 2.0 - } - ], - "target_type": "regression" + { + "tree": { + "feature_names": [ + "cost" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 400, + "decision_type": "lte", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 5.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 } - }, - { - "tree": { - "feature_names": [ - "type_tv" - ], - "tree_structure": [ - { - "node_index": 0, - "split_feature": 0, - "split_gain": 12, - "threshold": 1, - "decision_type": "lt", - "default_left": true, - "left_child": 1, - "right_child": 2 - }, - { - "node_index": 1, - "leaf_value": 1.0 - }, - { - "node_index": 2, - "leaf_value": 12.0 - } - ], - "target_type": "regression" + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "type_tv" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 12.0 } - }, - { - "tree": { - "feature_names": [ - "two" - ], - "tree_structure": [ - { - "node_index": 0, - "split_feature": 0, - "split_gain": 12, - "threshold": 1, - "decision_type": "lt", - "default_left": true, - "left_child": 1, - "right_child": 2 - }, - { - "node_index": 1, - "leaf_value": 1.0 - }, - { - "node_index": 2, - "leaf_value": 2.0 - } - ], - "target_type": "regression" + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "two" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 } - }, - { - "tree": { - "feature_names": [ - "product_bm25" - ], - "tree_structure": [ - { - "node_index": 0, - "split_feature": 0, - "split_gain": 12, - "threshold": 1, - "decision_type": "lt", - "default_left": true, - "left_child": 1, - "right_child": 2 - }, - { - "node_index": 1, - "leaf_value": 1.0 - }, - { - "node_index": 2, - "leaf_value": 4.0 - } - ], - "target_type": "regression" + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "product_bm25" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 4.0 } + ], + "target_type": "regression" } + } ] } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 6fdc4e73e184f..22a9c2dbcc281 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -1282,6 +1282,7 @@ public Collection createComponents(PluginServices services) { threadPool, clusterService, client, + inferenceAuditor, mlAssignmentNotifier, machineLearningExtension.get().isAnomalyDetectionEnabled(), machineLearningExtension.get().isDataFrameAnalyticsEnabled(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index a2d8fd1d60316..a1664b7023fc0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -32,6 +32,9 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.annotations.AnnotationIndex; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsFeatureFlag; +import org.elasticsearch.xpack.ml.inference.adaptiveallocations.AdaptiveAllocationsScalerService; +import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; import java.util.Collections; import java.util.Map; @@ -55,6 +58,8 @@ public final class MlInitializationService implements ClusterStateListener { private final MlDailyMaintenanceService mlDailyMaintenanceService; + private final AdaptiveAllocationsScalerService adaptiveAllocationsScalerService; + private boolean isMaster = false; MlInitializationService( @@ -62,6 +67,7 @@ public final class MlInitializationService implements ClusterStateListener { ThreadPool threadPool, ClusterService clusterService, Client client, + InferenceAuditor inferenceAuditor, MlAssignmentNotifier mlAssignmentNotifier, boolean isAnomalyDetectionEnabled, boolean isDataFrameAnalyticsEnabled, @@ -81,6 +87,7 @@ public final class MlInitializationService implements ClusterStateListener { isDataFrameAnalyticsEnabled, isNlpEnabled ), + new AdaptiveAllocationsScalerService(threadPool, clusterService, client, inferenceAuditor, isNlpEnabled), clusterService ); } @@ -90,11 +97,13 @@ public MlInitializationService( Client client, ThreadPool threadPool, MlDailyMaintenanceService dailyMaintenanceService, + AdaptiveAllocationsScalerService adaptiveAllocationsScalerService, ClusterService clusterService ) { this.client = Objects.requireNonNull(client); this.threadPool = threadPool; this.mlDailyMaintenanceService = dailyMaintenanceService; + this.adaptiveAllocationsScalerService = adaptiveAllocationsScalerService; clusterService.addListener(this); clusterService.addLifecycleListener(new LifecycleListener() { @Override @@ -115,11 +124,17 @@ public void beforeStop() { public void onMaster() { mlDailyMaintenanceService.start(); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + adaptiveAllocationsScalerService.start(); + } threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(this::makeMlInternalIndicesHidden); } public void offMaster() { mlDailyMaintenanceService.stop(); + if (AdaptiveAllocationsFeatureFlag.isEnabled()) { + adaptiveAllocationsScalerService.stop(); + } } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java index 348cb396f9c9f..30371fcbe115a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java @@ -75,7 +75,7 @@ public TransportCreateTrainedModelAssignmentAction( @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { trainedModelAssignmentClusterService.createNewModelAssignment( - request.getTaskParams(), + request, listener.delegateFailureAndWrap((l, trainedModelAssignment) -> l.onResponse(new Response(trainedModelAssignment))) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 04b597292dad6..590aeded2b674 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -238,6 +238,7 @@ static GetDeploymentStatsAction.Response addFailedRoutes( stat.getModelId(), stat.getThreadsPerAllocation(), stat.getNumberOfAllocations(), + stat.getAdaptiveAllocationsSettings(), stat.getQueueCapacity(), stat.getCacheSize(), stat.getStartTime(), @@ -277,6 +278,7 @@ static GetDeploymentStatsAction.Response addFailedRoutes( assignment.getModelId(), assignment.getTaskParams().getThreadsPerAllocation(), assignment.getTaskParams().getNumberOfAllocations(), + assignment.getAdaptiveAllocationsSettings(), assignment.getTaskParams().getQueueCapacity(), assignment.getTaskParams().getCacheSize().orElse(null), assignment.getStartTime(), @@ -346,6 +348,7 @@ protected void taskOperation( task.getParams().getModelId(), task.getParams().getThreadsPerAllocation(), assignment == null ? task.getParams().getNumberOfAllocations() : assignment.getTaskParams().getNumberOfAllocations(), + assignment == null ? null : assignment.getAdaptiveAllocationsSettings(), task.getParams().getQueueCapacity(), task.getParams().getCacheSize().orElse(null), TrainedModelAssignmentMetadata.fromState(clusterService.state()) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index de93a41fb7296..ae0da7dc9cc69 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -207,7 +207,7 @@ protected void masterOperation( modelIdAndSizeInBytes.v1(), request.getDeploymentId(), modelIdAndSizeInBytes.v2(), - request.getNumberOfAllocations(), + request.computeNumberOfAllocations(), request.getThreadsPerAllocation(), request.getQueueCapacity(), Optional.ofNullable(request.getCacheSize()).orElse(ByteSizeValue.ofBytes(modelIdAndSizeInBytes.v2())), @@ -219,7 +219,10 @@ protected void masterOperation( memoryTracker.refresh( persistentTasks, ActionListener.wrap( - aVoid -> trainedModelAssignmentService.createNewModelAssignment(taskParams, waitForDeploymentToStart), + aVoid -> trainedModelAssignmentService.createNewModelAssignment( + new CreateTrainedModelAssignmentAction.Request(taskParams, request.getAdaptiveAllocationsSettings()), + waitForDeploymentToStart + ), listener::onFailure ) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateTrainedModelDeploymentAction.java index 7d4143d9e722a..fa38b30ae8b84 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateTrainedModelDeploymentAction.java @@ -81,9 +81,11 @@ protected void masterOperation( ) ); - trainedModelAssignmentClusterService.updateNumberOfAllocations( + trainedModelAssignmentClusterService.updateDeployment( request.getDeploymentId(), request.getNumberOfAllocations(), + request.getAdaptiveAllocationsSettings(), + request.isInternal(), ActionListener.wrap(updatedAssignment -> { auditor.info( request.getDeploymentId(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java index 83338453050e0..7ef7a8f4e6dd5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java @@ -198,7 +198,7 @@ public String toString() { + ", docCount=" + serializableCategory.getNumMatches() + ", aggregations=" - + aggregations.asMap() + + aggregations.asList() + "}\n"; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java new file mode 100644 index 0000000000000..15f647bc76697 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.adaptiveallocations; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; + +/** + * Processes measured requests counts and inference times and decides whether + * the number of allocations should be scaled up or down. + */ +public class AdaptiveAllocationsScaler { + + // visible for testing + static final double SCALE_UP_THRESHOLD = 0.9; + private static final double SCALE_DOWN_THRESHOLD = 0.85; + + /** + * If the max_number_of_allocations is not set, use this value for now to prevent scaling up + * to high numbers due to possible bugs or unexpected behaviour in the scaler. + * TODO(jan): remove this safeguard when the scaler behaves as expected in production. + */ + private static final int MAX_NUMBER_OF_ALLOCATIONS_SAFEGUARD = 32; + + private static final Logger logger = LogManager.getLogger(AdaptiveAllocationsScaler.class); + + private final String deploymentId; + private final KalmanFilter1d requestRateEstimator; + private final KalmanFilter1d inferenceTimeEstimator; + + private int numberOfAllocations; + private Integer minNumberOfAllocations; + private Integer maxNumberOfAllocations; + private boolean dynamicsChanged; + + AdaptiveAllocationsScaler(String deploymentId, int numberOfAllocations) { + this.deploymentId = deploymentId; + // A smoothing factor of 100 roughly means the last 100 measurements have an effect + // on the estimated values. The sampling time is 10 seconds, so approximately the + // last 15 minutes are taken into account. + // For the request rate, use auto-detection for dynamics changes, because the request + // rate maybe change due to changed user behaviour. + // For the inference time, don't use this auto-detection. The dynamics may change when + // the number of allocations changes, which is passed explicitly to the estimator. + requestRateEstimator = new KalmanFilter1d(deploymentId + ":rate", 100, true); + inferenceTimeEstimator = new KalmanFilter1d(deploymentId + ":time", 100, false); + this.numberOfAllocations = numberOfAllocations; + this.minNumberOfAllocations = null; + this.maxNumberOfAllocations = null; + this.dynamicsChanged = false; + } + + void setMinMaxNumberOfAllocations(Integer minNumberOfAllocations, Integer maxNumberOfAllocations) { + this.minNumberOfAllocations = minNumberOfAllocations; + this.maxNumberOfAllocations = maxNumberOfAllocations; + } + + void process(AdaptiveAllocationsScalerService.Stats stats, double timeIntervalSeconds, int numberOfAllocations) { + // The request rate (per second) is the request count divided by the time. + // Assuming a Poisson process for the requests, the variance in the request + // count equals the mean request count, and the variance in the request rate + // equals that variance divided by the time interval squared. + // The minimum request count is set to 1, because lower request counts can't + // be reliably measured. + // The estimated request rate should be used for the variance calculations, + // because the measured request rate gives biased estimates. + double requestRate = (double) stats.requestCount() / timeIntervalSeconds; + double requestRateEstimate = requestRateEstimator.hasValue() ? requestRateEstimator.estimate() : requestRate; + double requestRateVariance = Math.max(1.0, requestRateEstimate * timeIntervalSeconds) / Math.pow(timeIntervalSeconds, 2); + requestRateEstimator.add(requestRate, requestRateVariance, false); + + if (stats.requestCount() > 0 && Double.isNaN(stats.inferenceTime()) == false) { + // The inference time distribution is unknown. For simplicity, we assume + // a std.error equal to the mean, so that the variance equals the mean + // value squared. The variance of the mean is inversely proportional to + // the number of inference measurements it contains. + // Again, the estimated inference time should be used for the variance + // calculations to prevent biased estimates. + double inferenceTime = stats.inferenceTime(); + double inferenceTimeEstimate = inferenceTimeEstimator.hasValue() ? inferenceTimeEstimator.estimate() : inferenceTime; + double inferenceTimeVariance = Math.pow(inferenceTimeEstimate, 2) / stats.requestCount(); + inferenceTimeEstimator.add(inferenceTime, inferenceTimeVariance, dynamicsChanged); + } + + this.numberOfAllocations = numberOfAllocations; + dynamicsChanged = false; + } + + double getLoadLower() { + double requestRateLower = Math.max(0.0, requestRateEstimator.lower()); + double inferenceTimeLower = Math.max(0.0, inferenceTimeEstimator.hasValue() ? inferenceTimeEstimator.lower() : 1.0); + return requestRateLower * inferenceTimeLower; + } + + double getLoadUpper() { + double requestRateUpper = requestRateEstimator.upper(); + double inferenceTimeUpper = inferenceTimeEstimator.hasValue() ? inferenceTimeEstimator.upper() : 1.0; + return requestRateUpper * inferenceTimeUpper; + } + + Integer scale() { + if (requestRateEstimator.hasValue() == false) { + return null; + } + + int oldNumberOfAllocations = numberOfAllocations; + + double loadLower = getLoadLower(); + while (loadLower / numberOfAllocations > SCALE_UP_THRESHOLD) { + numberOfAllocations++; + } + + double loadUpper = getLoadUpper(); + while (numberOfAllocations > 1 && loadUpper / (numberOfAllocations - 1) < SCALE_DOWN_THRESHOLD) { + numberOfAllocations--; + } + + if (maxNumberOfAllocations == null) { + numberOfAllocations = Math.min(numberOfAllocations, MAX_NUMBER_OF_ALLOCATIONS_SAFEGUARD); + } + if (minNumberOfAllocations != null) { + numberOfAllocations = Math.max(numberOfAllocations, minNumberOfAllocations); + } + if (maxNumberOfAllocations != null) { + numberOfAllocations = Math.min(numberOfAllocations, maxNumberOfAllocations); + } + + if (numberOfAllocations != oldNumberOfAllocations) { + logger.debug( + () -> Strings.format( + "[%s] adaptive allocations scaler: load in [%.3f, %.3f], scaling from %d to %d allocations.", + deploymentId, + loadLower, + loadUpper, + oldNumberOfAllocations, + numberOfAllocations + ) + ); + } else { + logger.debug( + () -> Strings.format( + "[%s] adaptive allocations scaler: load in [%.3f, %.3f], keeping %d allocations.", + deploymentId, + loadLower, + loadUpper, + numberOfAllocations + ) + ); + } + + if (numberOfAllocations != oldNumberOfAllocations) { + this.dynamicsChanged = true; + return numberOfAllocations; + } else { + return null; + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java new file mode 100644 index 0000000000000..30e3871ad5ad0 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java @@ -0,0 +1,340 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.adaptiveallocations; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction; +import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Periodically schedules adaptive allocations scaling. This process consists + * of calling the trained model stats API, processing the results, determining + * whether scaling should be applied, and potentially calling the trained + * model update API. + */ +public class AdaptiveAllocationsScalerService implements ClusterStateListener { + + record Stats(long successCount, long pendingCount, long failedCount, double inferenceTime) { + + long requestCount() { + return successCount + pendingCount + failedCount; + } + + double totalInferenceTime() { + return successCount * inferenceTime; + } + + Stats add(Stats value) { + long newSuccessCount = successCount + value.successCount; + long newPendingCount = pendingCount + value.pendingCount; + long newFailedCount = failedCount + value.failedCount; + double newInferenceTime = newSuccessCount > 0 + ? (totalInferenceTime() + value.totalInferenceTime()) / newSuccessCount + : Double.NaN; + return new Stats(newSuccessCount, newPendingCount, newFailedCount, newInferenceTime); + } + + Stats sub(Stats value) { + long newSuccessCount = Math.max(0, successCount - value.successCount); + long newPendingCount = Math.max(0, pendingCount - value.pendingCount); + long newFailedCount = Math.max(0, failedCount - value.failedCount); + double newInferenceTime = newSuccessCount > 0 + ? (totalInferenceTime() - value.totalInferenceTime()) / newSuccessCount + : Double.NaN; + return new Stats(newSuccessCount, newPendingCount, newFailedCount, newInferenceTime); + } + } + + /** + * The time interval between the adaptive allocations triggers. + */ + private static final int DEFAULT_TIME_INTERVAL_SECONDS = 10; + /** + * The time that has to pass after scaling up, before scaling down is allowed. + * Note that the ML autoscaling has its own cooldown time to release the hardware. + */ + private static final long SCALE_UP_COOLDOWN_TIME_MILLIS = TimeValue.timeValueMinutes(5).getMillis(); + + private static final Logger logger = LogManager.getLogger(AdaptiveAllocationsScalerService.class); + + private final int timeIntervalSeconds; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final Client client; + private final InferenceAuditor inferenceAuditor; + private final boolean isNlpEnabled; + private final Map> lastInferenceStatsByDeploymentAndNode; + private Long lastInferenceStatsTimestampMillis; + private final Map scalers; + private final Map lastScaleUpTimesMillis; + + private volatile Scheduler.Cancellable cancellable; + private final AtomicBoolean busy; + + public AdaptiveAllocationsScalerService( + ThreadPool threadPool, + ClusterService clusterService, + Client client, + InferenceAuditor inferenceAuditor, + boolean isNlpEnabled + ) { + this(threadPool, clusterService, client, inferenceAuditor, isNlpEnabled, DEFAULT_TIME_INTERVAL_SECONDS); + } + + // visible for testing + AdaptiveAllocationsScalerService( + ThreadPool threadPool, + ClusterService clusterService, + Client client, + InferenceAuditor inferenceAuditor, + boolean isNlpEnabled, + int timeIntervalSeconds + ) { + this.threadPool = threadPool; + this.clusterService = clusterService; + this.client = client; + this.inferenceAuditor = inferenceAuditor; + this.isNlpEnabled = isNlpEnabled; + this.timeIntervalSeconds = timeIntervalSeconds; + + lastInferenceStatsByDeploymentAndNode = new HashMap<>(); + lastInferenceStatsTimestampMillis = null; + lastScaleUpTimesMillis = new HashMap<>(); + scalers = new HashMap<>(); + busy = new AtomicBoolean(false); + } + + public synchronized void start() { + updateAutoscalers(clusterService.state()); + clusterService.addListener(this); + if (scalers.isEmpty() == false) { + startScheduling(); + } + } + + public synchronized void stop() { + stopScheduling(); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + updateAutoscalers(event.state()); + if (scalers.isEmpty() == false) { + startScheduling(); + } else { + stopScheduling(); + } + } + + private synchronized void updateAutoscalers(ClusterState state) { + if (isNlpEnabled == false) { + return; + } + Set deploymentIds = new HashSet<>(); + TrainedModelAssignmentMetadata assignments = TrainedModelAssignmentMetadata.fromState(state); + for (TrainedModelAssignment assignment : assignments.allAssignments().values()) { + deploymentIds.add(assignment.getDeploymentId()); + if (assignment.getAdaptiveAllocationsSettings() != null && assignment.getAdaptiveAllocationsSettings().getEnabled()) { + AdaptiveAllocationsScaler adaptiveAllocationsScaler = scalers.computeIfAbsent( + assignment.getDeploymentId(), + key -> new AdaptiveAllocationsScaler(assignment.getDeploymentId(), assignment.totalTargetAllocations()) + ); + adaptiveAllocationsScaler.setMinMaxNumberOfAllocations( + assignment.getAdaptiveAllocationsSettings().getMinNumberOfAllocations(), + assignment.getAdaptiveAllocationsSettings().getMaxNumberOfAllocations() + ); + } else { + scalers.remove(assignment.getDeploymentId()); + lastInferenceStatsByDeploymentAndNode.remove(assignment.getDeploymentId()); + } + } + scalers.keySet().removeIf(key -> deploymentIds.contains(key) == false); + } + + private synchronized void startScheduling() { + if (cancellable == null) { + logger.debug("Starting ML adaptive allocations scaler"); + try { + cancellable = threadPool.scheduleWithFixedDelay( + this::trigger, + TimeValue.timeValueSeconds(timeIntervalSeconds), + threadPool.generic() + ); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown() == false) { + throw e; + } + } + } + } + + private synchronized void stopScheduling() { + if (cancellable != null && cancellable.isCancelled() == false) { + logger.debug("Stopping ML adaptive allocations scaler"); + cancellable.cancel(); + cancellable = null; + } + } + + private void trigger() { + if (busy.getAndSet(true)) { + logger.debug("Skipping inference adaptive allocations scaling, because it's still busy."); + return; + } + ActionListener listener = ActionListener.runAfter( + ActionListener.wrap(this::processDeploymentStats, e -> logger.warn("Error in inference adaptive allocations scaling", e)), + () -> busy.set(false) + ); + getDeploymentStats(listener); + } + + private void getDeploymentStats(ActionListener processDeploymentStats) { + String deploymentIds = String.join(",", scalers.keySet()); + ClientHelper.executeAsyncWithOrigin( + client, + ClientHelper.ML_ORIGIN, + GetDeploymentStatsAction.INSTANCE, + // TODO(dave/jan): create a lightweight version of this request, because the current one + // collects too much data for the adaptive allocations scaler. + new GetDeploymentStatsAction.Request(deploymentIds), + processDeploymentStats + ); + } + + private void processDeploymentStats(GetDeploymentStatsAction.Response statsResponse) { + Double statsTimeInterval; + long now = System.currentTimeMillis(); + if (lastInferenceStatsTimestampMillis != null) { + statsTimeInterval = (now - lastInferenceStatsTimestampMillis) / 1000.0; + } else { + statsTimeInterval = null; + } + lastInferenceStatsTimestampMillis = now; + + Map recentStatsByDeployment = new HashMap<>(); + Map numberOfAllocations = new HashMap<>(); + + for (AssignmentStats assignmentStats : statsResponse.getStats().results()) { + String deploymentId = assignmentStats.getDeploymentId(); + numberOfAllocations.put(deploymentId, assignmentStats.getNumberOfAllocations()); + Map deploymentStats = lastInferenceStatsByDeploymentAndNode.computeIfAbsent( + deploymentId, + key -> new HashMap<>() + ); + for (AssignmentStats.NodeStats nodeStats : assignmentStats.getNodeStats()) { + String nodeId = nodeStats.getNode().getId(); + Stats lastStats = deploymentStats.get(nodeId); + Stats nextStats = new Stats( + nodeStats.getInferenceCount().orElse(0L), + nodeStats.getPendingCount() == null ? 0 : nodeStats.getPendingCount(), + nodeStats.getErrorCount() + nodeStats.getTimeoutCount() + nodeStats.getRejectedExecutionCount(), + nodeStats.getAvgInferenceTime().orElse(0.0) / 1000.0 + ); + deploymentStats.put(nodeId, nextStats); + if (lastStats != null) { + Stats recentStats = nextStats.sub(lastStats); + recentStatsByDeployment.compute( + assignmentStats.getDeploymentId(), + (key, value) -> value == null ? recentStats : value.add(recentStats) + ); + } + } + } + + if (statsTimeInterval == null) { + return; + } + + for (Map.Entry deploymentAndStats : recentStatsByDeployment.entrySet()) { + String deploymentId = deploymentAndStats.getKey(); + Stats stats = deploymentAndStats.getValue(); + AdaptiveAllocationsScaler adaptiveAllocationsScaler = scalers.get(deploymentId); + adaptiveAllocationsScaler.process(stats, statsTimeInterval, numberOfAllocations.get(deploymentId)); + Integer newNumberOfAllocations = adaptiveAllocationsScaler.scale(); + if (newNumberOfAllocations != null) { + Long lastScaleUpTimeMillis = lastScaleUpTimesMillis.get(deploymentId); + if (newNumberOfAllocations < numberOfAllocations.get(deploymentId) + && lastScaleUpTimeMillis != null + && now < lastScaleUpTimeMillis + SCALE_UP_COOLDOWN_TIME_MILLIS) { + logger.debug("adaptive allocations scaler: skipping scaling down [{}] because of recent scaleup.", deploymentId); + continue; + } + if (newNumberOfAllocations > numberOfAllocations.get(deploymentId)) { + lastScaleUpTimesMillis.put(deploymentId, now); + } + UpdateTrainedModelDeploymentAction.Request updateRequest = new UpdateTrainedModelDeploymentAction.Request(deploymentId); + updateRequest.setNumberOfAllocations(newNumberOfAllocations); + updateRequest.setIsInternal(true); + ClientHelper.executeAsyncWithOrigin( + client, + ClientHelper.ML_ORIGIN, + UpdateTrainedModelDeploymentAction.INSTANCE, + updateRequest, + ActionListener.wrap(updateResponse -> { + logger.info("adaptive allocations scaler: scaled [{}] to [{}] allocations.", deploymentId, newNumberOfAllocations); + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) + .execute( + () -> inferenceAuditor.info( + deploymentId, + Strings.format( + "adaptive allocations scaler: scaled [%s] to [%s] allocations.", + deploymentId, + newNumberOfAllocations + ) + ) + ); + }, e -> { + logger.atLevel(Level.WARN) + .withThrowable(e) + .log( + "adaptive allocations scaler: scaling [{}] to [{}] allocations failed.", + deploymentId, + newNumberOfAllocations + ); + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) + .execute( + () -> inferenceAuditor.warning( + deploymentId, + Strings.format( + "adaptive allocations scaler: scaling [%s] to [%s] allocations failed.", + deploymentId, + newNumberOfAllocations + ) + ) + ); + }) + ); + } + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/KalmanFilter1d.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/KalmanFilter1d.java new file mode 100644 index 0000000000000..ad3e66fc3e8e2 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/KalmanFilter1d.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.adaptiveallocations; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; + +/** + * Estimator for the mean value and stderr of a series of measurements. + *
      + * This implements a 1d Kalman filter with manoeuvre detection. Rather than a derived + * dynamics model we simply fix how much we want to smooth in the steady state. + * See also: Wikipedia. + */ +class KalmanFilter1d { + + private static final Logger logger = LogManager.getLogger(KalmanFilter1d.class); + + private final String name; + private final double smoothingFactor; + private final boolean autodetectDynamicsChange; + + private double value; + private double variance; + private boolean dynamicsChangedLastTime; + + KalmanFilter1d(String name, double smoothingFactor, boolean autodetectDynamicsChange) { + this.name = name; + this.smoothingFactor = smoothingFactor; + this.autodetectDynamicsChange = autodetectDynamicsChange; + this.value = Double.MAX_VALUE; + this.variance = Double.MAX_VALUE; + this.dynamicsChangedLastTime = false; + } + + /** + * Adds a measurement (value, variance) to the estimator. + * dynamicChangedExternal indicates whether the underlying possibly changed before this measurement. + */ + void add(double value, double variance, boolean dynamicChangedExternal) { + boolean dynamicChanged; + if (hasValue() == false) { + dynamicChanged = true; + this.value = value; + this.variance = variance; + } else { + double processVariance = variance / smoothingFactor; + dynamicChanged = dynamicChangedExternal || detectDynamicsChange(value, variance); + if (dynamicChanged || dynamicsChangedLastTime) { + // If we know we likely had a change in the quantity we're estimating or the prediction + // is 10 stddev off, we inject extra noise in the dynamics for this step. + processVariance = Math.pow(value, 2); + } + + double gain = (this.variance + processVariance) / (this.variance + processVariance + variance); + this.value += gain * (value - this.value); + this.variance = (1 - gain) * (this.variance + processVariance); + } + dynamicsChangedLastTime = dynamicChanged; + logger.debug( + () -> Strings.format( + "[%s] measurement %.3f ± %.3f: estimate %.3f ± %.3f (dynamic changed: %s).", + name, + value, + Math.sqrt(variance), + this.value, + Math.sqrt(this.variance), + dynamicChanged + ) + ); + } + + /** + * Returns whether the estimator has received data and contains a value. + */ + boolean hasValue() { + return this.value < Double.MAX_VALUE && this.variance < Double.MAX_VALUE; + } + + /** + * Returns the estimate of the mean value. + */ + double estimate() { + return value; + } + + /** + * Returns the stderr of the estimate. + */ + double error() { + return Math.sqrt(this.variance); + } + + /** + * Returns the lowerbound of the 1 stddev confidence interval of the estimate. + */ + double lower() { + return value - error(); + } + + /** + * Returns the upperbound of the 1 stddev confidence interval of the estimate. + */ + double upper() { + return value + error(); + } + + /** + * Returns whether (value, variance) is very unlikely, indicating that + * the underlying dynamics have changed. + */ + private boolean detectDynamicsChange(double value, double variance) { + return hasValue() && autodetectDynamicsChange && Math.pow(Math.abs(value - this.value), 2) / (variance + this.variance) > 100.0; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index f468e5239fd29..e86a9cfe94045 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -14,6 +14,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -26,6 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.set.Sets; @@ -38,8 +40,10 @@ import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelAssignmentRoutingInfoAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; @@ -68,6 +72,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction.Request.NUMBER_OF_ALLOCATIONS; import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.NODES_CHANGED_REASON; import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.createShuttingDownRoute; @@ -393,7 +398,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) } public void createNewModelAssignment( - StartTrainedModelDeploymentAction.TaskParams params, + CreateTrainedModelAssignmentAction.Request request, ActionListener listener ) { if (clusterService.state().getMinTransportVersion().before(DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION)) { @@ -401,8 +406,8 @@ public void createNewModelAssignment( new ElasticsearchStatusException( "cannot create new assignment [{}] for model [{}] while cluster upgrade is in progress", RestStatus.CONFLICT, - params.getDeploymentId(), - params.getModelId() + request.getTaskParams().getDeploymentId(), + request.getTaskParams().getModelId() ) ); return; @@ -413,20 +418,20 @@ public void createNewModelAssignment( new ElasticsearchStatusException( "cannot create new assignment [{}] for model [{}] while feature reset is in progress.", RestStatus.CONFLICT, - params.getDeploymentId(), - params.getModelId() + request.getTaskParams().getDeploymentId(), + request.getTaskParams().getModelId() ) ); return; } - rebalanceAssignments(clusterService.state(), Optional.of(params), "model deployment started", ActionListener.wrap(newMetadata -> { - TrainedModelAssignment assignment = newMetadata.getDeploymentAssignment(params.getDeploymentId()); + rebalanceAssignments(clusterService.state(), Optional.of(request), "model deployment started", ActionListener.wrap(newMetadata -> { + TrainedModelAssignment assignment = newMetadata.getDeploymentAssignment(request.getTaskParams().getDeploymentId()); if (assignment == null) { // If we could not allocate the model anywhere then it is possible the assignment // here is null. We should notify the listener of an empty assignment as the // handling of this is done elsewhere with the wait-to-start predicate. - assignment = TrainedModelAssignment.Builder.empty(params).build(); + assignment = TrainedModelAssignment.Builder.empty(request).build(); } listener.onResponse(assignment); }, listener::onFailure)); @@ -528,13 +533,13 @@ private static ClusterState forceUpdate(ClusterState currentState, TrainedModelA return ClusterState.builder(currentState).metadata(metadata).build(); } - ClusterState createModelAssignment(ClusterState currentState, StartTrainedModelDeploymentAction.TaskParams params) throws Exception { - return update(currentState, rebalanceAssignments(currentState, Optional.of(params))); + ClusterState createModelAssignment(ClusterState currentState, CreateTrainedModelAssignmentAction.Request request) throws Exception { + return update(currentState, rebalanceAssignments(currentState, Optional.of(request))); } private void rebalanceAssignments( ClusterState clusterState, - Optional modelToAdd, + Optional createAssignmentRequest, String reason, ActionListener listener ) { @@ -544,7 +549,7 @@ private void rebalanceAssignments( TrainedModelAssignmentMetadata.Builder rebalancedMetadata; try { - rebalancedMetadata = rebalanceAssignments(clusterState, modelToAdd); + rebalancedMetadata = rebalanceAssignments(clusterState, createAssignmentRequest); } catch (Exception e) { listener.onFailure(e); return; @@ -561,7 +566,7 @@ public ClusterState execute(ClusterState currentState) { currentState = stopPlatformSpecificModelsInHeterogeneousClusters( currentState, mlNodesArchitectures, - modelToAdd, + createAssignmentRequest.map(CreateTrainedModelAssignmentAction.Request::getTaskParams), clusterState ); @@ -572,7 +577,7 @@ public ClusterState execute(ClusterState currentState) { return updatedState; } - rebalanceAssignments(currentState, modelToAdd, reason, listener); + rebalanceAssignments(currentState, createAssignmentRequest, reason, listener); return currentState; } @@ -639,7 +644,7 @@ && detectNodeLoads(sourceNodes, source).equals(detectNodeLoads(targetNodes, targ private TrainedModelAssignmentMetadata.Builder rebalanceAssignments( ClusterState currentState, - Optional modelToAdd + Optional createAssignmentRequest ) throws Exception { List nodes = getAssignableNodes(currentState); logger.debug(() -> format("assignable nodes are %s", nodes.stream().map(DiscoveryNode::getId).toList())); @@ -651,7 +656,7 @@ private TrainedModelAssignmentMetadata.Builder rebalanceAssignments( currentMetadata, nodeLoads, nodeAvailabilityZoneMapper.buildMlNodesByAvailabilityZone(currentState), - modelToAdd, + createAssignmentRequest, allocatedProcessorsScale, useNewMemoryFields ); @@ -668,8 +673,12 @@ private TrainedModelAssignmentMetadata.Builder rebalanceAssignments( rebalancer.rebalance() ); - if (modelToAdd.isPresent()) { - checkModelIsFullyAllocatedIfScalingIsNotPossible(modelToAdd.get().getDeploymentId(), rebalanced, nodes); + if (createAssignmentRequest.isPresent()) { + checkModelIsFullyAllocatedIfScalingIsNotPossible( + createAssignmentRequest.get().getTaskParams().getDeploymentId(), + rebalanced, + nodes + ); } return rebalanced; @@ -795,14 +804,22 @@ private boolean isScalingPossible(List nodes) { || (smallestMLNode.isPresent() && smallestMLNode.getAsLong() < maxMLNodeSize); } - public void updateNumberOfAllocations(String deploymentId, int numberOfAllocations, ActionListener listener) { - updateNumberOfAllocations(clusterService.state(), deploymentId, numberOfAllocations, listener); + public void updateDeployment( + String deploymentId, + Integer numberOfAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, + boolean isInternal, + ActionListener listener + ) { + updateDeployment(clusterService.state(), deploymentId, numberOfAllocations, adaptiveAllocationsSettings, isInternal, listener); } - private void updateNumberOfAllocations( + private void updateDeployment( ClusterState clusterState, String deploymentId, - int numberOfAllocations, + Integer numberOfAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettingsUpdates, + boolean isInternal, ActionListener listener ) { TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.fromState(clusterState); @@ -811,7 +828,27 @@ private void updateNumberOfAllocations( listener.onFailure(ExceptionsHelper.missingModelDeployment(deploymentId)); return; } - if (existingAssignment.getTaskParams().getNumberOfAllocations() == numberOfAllocations) { + AdaptiveAllocationsSettings adaptiveAllocationsSettings = getAdaptiveAllocationsSettings( + existingAssignment.getAdaptiveAllocationsSettings(), + adaptiveAllocationsSettingsUpdates + ); + if (adaptiveAllocationsSettings != null) { + if (isInternal == false && adaptiveAllocationsSettings.getEnabled() && numberOfAllocations != null) { + ValidationException validationException = new ValidationException(); + validationException.addValidationError("[" + NUMBER_OF_ALLOCATIONS + "] cannot be set if adaptive allocations is enabled"); + listener.onFailure(validationException); + return; + } + ActionRequestValidationException validationException = adaptiveAllocationsSettings.validate(); + if (validationException != null) { + listener.onFailure(validationException); + return; + } + } + boolean hasUpdates = (numberOfAllocations != null + && Objects.equals(numberOfAllocations, existingAssignment.getTaskParams().getNumberOfAllocations()) == false) + || Objects.equals(adaptiveAllocationsSettings, existingAssignment.getAdaptiveAllocationsSettings()) == false; + if (hasUpdates == false) { listener.onResponse(existingAssignment); return; } @@ -828,7 +865,7 @@ private void updateNumberOfAllocations( if (clusterState.getMinTransportVersion().before(DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION)) { listener.onFailure( new ElasticsearchStatusException( - "cannot update number_of_allocations for deployment with model id [{}] while cluster upgrade is in progress.", + "cannot update deployment with model id [{}] while cluster upgrade is in progress.", RestStatus.CONFLICT, deploymentId ) @@ -837,7 +874,7 @@ private void updateNumberOfAllocations( } ActionListener updatedStateListener = ActionListener.wrap( - updatedState -> submitUnbatchedTask("update model deployment number_of_allocations", new ClusterStateUpdateTask() { + updatedState -> submitUnbatchedTask("update model deployment", new ClusterStateUpdateTask() { private volatile boolean isUpdated; @@ -848,7 +885,7 @@ public ClusterState execute(ClusterState currentState) { return updatedState; } logger.debug(() -> format("[%s] Retrying update as cluster state has been modified", deploymentId)); - updateNumberOfAllocations(currentState, deploymentId, numberOfAllocations, listener); + updateDeployment(currentState, deploymentId, numberOfAllocations, adaptiveAllocationsSettings, isInternal, listener); return currentState; } @@ -877,38 +914,69 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) listener::onFailure ); - adjustNumberOfAllocations(clusterState, existingAssignment, numberOfAllocations, updatedStateListener); + updateAssignment(clusterState, existingAssignment, numberOfAllocations, adaptiveAllocationsSettings, updatedStateListener); + } + + private AdaptiveAllocationsSettings getAdaptiveAllocationsSettings( + AdaptiveAllocationsSettings original, + AdaptiveAllocationsSettings updates + ) { + if (updates == null) { + return original; + } else if (updates == AdaptiveAllocationsSettings.RESET_PLACEHOLDER) { + return null; + } else if (original == null) { + return updates; + } else { + return original.merge(updates); + } } - private void adjustNumberOfAllocations( + private void updateAssignment( ClusterState clusterState, TrainedModelAssignment assignment, - int numberOfAllocations, + Integer numberOfAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, ActionListener listener ) { threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - if (numberOfAllocations > assignment.getTaskParams().getNumberOfAllocations()) { - increaseNumberOfAllocations(clusterState, assignment, numberOfAllocations, listener); + if (numberOfAllocations == null || numberOfAllocations == assignment.getTaskParams().getNumberOfAllocations()) { + updateAndKeepNumberOfAllocations(clusterState, assignment, adaptiveAllocationsSettings, listener); + } else if (numberOfAllocations > assignment.getTaskParams().getNumberOfAllocations()) { + increaseNumberOfAllocations(clusterState, assignment, numberOfAllocations, adaptiveAllocationsSettings, listener); } else { - decreaseNumberOfAllocations(clusterState, assignment, numberOfAllocations, listener); + decreaseNumberOfAllocations(clusterState, assignment, numberOfAllocations, adaptiveAllocationsSettings, listener); } }); } + private void updateAndKeepNumberOfAllocations( + ClusterState clusterState, + TrainedModelAssignment assignment, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, + ActionListener listener + ) { + TrainedModelAssignment.Builder updatedAssignment = TrainedModelAssignment.Builder.fromAssignment(assignment) + .setAdaptiveAllocationsSettings(adaptiveAllocationsSettings); + TrainedModelAssignmentMetadata.Builder builder = TrainedModelAssignmentMetadata.builder(clusterState); + builder.updateAssignment(assignment.getDeploymentId(), updatedAssignment); + listener.onResponse(update(clusterState, builder)); + } + private void increaseNumberOfAllocations( ClusterState clusterState, TrainedModelAssignment assignment, int numberOfAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, ActionListener listener ) { try { + TrainedModelAssignment.Builder updatedAssignment = TrainedModelAssignment.Builder.fromAssignment(assignment) + .setNumberOfAllocations(numberOfAllocations) + .setAdaptiveAllocationsSettings(adaptiveAllocationsSettings); final ClusterState updatedClusterState = update( clusterState, - TrainedModelAssignmentMetadata.builder(clusterState) - .updateAssignment( - assignment.getDeploymentId(), - TrainedModelAssignment.Builder.fromAssignment(assignment).setNumberOfAllocations(numberOfAllocations) - ) + TrainedModelAssignmentMetadata.builder(clusterState).updateAssignment(assignment.getDeploymentId(), updatedAssignment) ); TrainedModelAssignmentMetadata.Builder rebalancedMetadata = rebalanceAssignments(updatedClusterState, Optional.empty()); if (isScalingPossible(getAssignableNodes(clusterState)) == false @@ -931,6 +999,7 @@ private void decreaseNumberOfAllocations( ClusterState clusterState, TrainedModelAssignment assignment, int numberOfAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, ActionListener listener ) { TrainedModelAssignment.Builder updatedAssignment = numberOfAllocations < assignment.totalTargetAllocations() @@ -938,7 +1007,7 @@ private void decreaseNumberOfAllocations( numberOfAllocations ) : TrainedModelAssignment.Builder.fromAssignment(assignment).setNumberOfAllocations(numberOfAllocations); - + updatedAssignment.setAdaptiveAllocationsSettings(adaptiveAllocationsSettings); // We have now reduced allocations to a number we can be sure it is satisfied // and thus we should clear the assignment reason. if (numberOfAllocations <= assignment.totalTargetAllocations()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 7052e6f147b36..afd17b803cdcb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -12,8 +12,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.UnsafePlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -53,7 +52,6 @@ import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import org.elasticsearch.xpack.ml.task.AbstractJobPersistentTasksExecutor; -import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.Deque; @@ -154,16 +152,29 @@ public void beforeStop() { this.expressionResolver = expressionResolver; } - public void start() { + void start() { stopped = false; - scheduledFuture = threadPool.scheduleWithFixedDelay( - this::loadQueuedModels, - MODEL_LOADING_CHECK_INTERVAL, - threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) - ); + schedule(false); } - public void stop() { + private void schedule(boolean runImmediately) { + if (stopped) { + // do not schedule when stopped + return; + } + + var rescheduleListener = ActionListener.wrap(this::schedule, e -> this.schedule(false)); + Runnable loadQueuedModels = () -> loadQueuedModels(rescheduleListener); + var executor = threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME); + + if (runImmediately) { + executor.execute(loadQueuedModels); + } else { + scheduledFuture = threadPool.schedule(loadQueuedModels, MODEL_LOADING_CHECK_INTERVAL, executor); + } + } + + void stop() { stopped = true; ThreadPool.Cancellable cancellable = this.scheduledFuture; if (cancellable != null) { @@ -171,9 +182,9 @@ public void stop() { } } - void loadQueuedModels() { - TrainedModelDeploymentTask loadingTask; - if (loadingModels.isEmpty()) { + void loadQueuedModels(ActionListener rescheduleImmediately) { + if (stopped) { + rescheduleImmediately.onResponse(false); return; } if (latestState != null) { @@ -188,39 +199,49 @@ void loadQueuedModels() { ); if (unassignedIndices.size() > 0) { logger.trace("not loading models as indices {} primary shards are unassigned", unassignedIndices); + rescheduleImmediately.onResponse(false); return; } } - logger.trace("attempting to load all currently queued models"); - // NOTE: As soon as this method exits, the timer for the scheduler starts ticking - Deque loadingToRetry = new ArrayDeque<>(); - while ((loadingTask = loadingModels.poll()) != null) { - final String deploymentId = loadingTask.getDeploymentId(); - if (loadingTask.isStopped()) { - if (logger.isTraceEnabled()) { - String reason = loadingTask.stoppedReason().orElse("_unknown_"); - logger.trace("[{}] attempted to load stopped task with reason [{}]", deploymentId, reason); - } - continue; + + var loadingTask = loadingModels.poll(); + if (loadingTask == null) { + rescheduleImmediately.onResponse(false); + return; + } + + loadModel(loadingTask, ActionListener.wrap(retry -> { + if (retry != null && retry) { + loadingModels.offer(loadingTask); + // don't reschedule immediately if the next task is the one we just queued, instead wait a bit to retry + rescheduleImmediately.onResponse(loadingModels.peek() != loadingTask); + } else { + rescheduleImmediately.onResponse(loadingModels.isEmpty() == false); } - if (stopped) { - return; + }, e -> rescheduleImmediately.onResponse(loadingModels.isEmpty() == false))); + } + + void loadModel(TrainedModelDeploymentTask loadingTask, ActionListener retryListener) { + if (loadingTask.isStopped()) { + if (logger.isTraceEnabled()) { + logger.trace( + "[{}] attempted to load stopped task with reason [{}]", + loadingTask.getDeploymentId(), + loadingTask.stoppedReason().orElse("_unknown_") + ); } - final PlainActionFuture listener = new UnsafePlainActionFuture<>( - MachineLearning.UTILITY_THREAD_POOL_NAME - ); - try { - deploymentManager.startDeployment(loadingTask, listener); - // This needs to be synchronous here in the utility thread to keep queueing order - TrainedModelDeploymentTask deployedTask = listener.actionGet(); - // kicks off asynchronous cluster state update - handleLoadSuccess(deployedTask); - } catch (Exception ex) { + retryListener.onResponse(false); + return; + } + SubscribableListener.newForked(l -> deploymentManager.startDeployment(loadingTask, l)) + .andThen(threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME), threadPool.getThreadContext(), this::handleLoadSuccess) + .addListener(retryListener.delegateResponse((retryL, ex) -> { + var deploymentId = loadingTask.getDeploymentId(); logger.warn(() -> "[" + deploymentId + "] Start deployment failed", ex); if (ExceptionsHelper.unwrapCause(ex) instanceof ResourceNotFoundException) { - String modelId = loadingTask.getParams().getModelId(); + var modelId = loadingTask.getParams().getModelId(); logger.debug(() -> "[" + deploymentId + "] Start deployment failed as model [" + modelId + "] was not found", ex); - handleLoadFailure(loadingTask, ExceptionsHelper.missingTrainedModel(modelId, ex)); + handleLoadFailure(loadingTask, ExceptionsHelper.missingTrainedModel(modelId, ex), retryL); } else if (ExceptionsHelper.unwrapCause(ex) instanceof SearchPhaseExecutionException) { /* * This case will not catch the ElasticsearchException generated from the ChunkedTrainedModelRestorer in a scenario @@ -232,13 +253,11 @@ void loadQueuedModels() { // A search phase execution failure should be retried, push task back to the queue // This will cause the entire model to be reloaded (all the chunks) - loadingToRetry.add(loadingTask); + retryL.onResponse(true); } else { - handleLoadFailure(loadingTask, ex); + handleLoadFailure(loadingTask, ex, retryL); } - } - } - loadingModels.addAll(loadingToRetry); + }), threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME), threadPool.getThreadContext()); } public void gracefullyStopDeploymentAndNotify( @@ -680,14 +699,14 @@ void prepareModelToLoad(StartTrainedModelDeploymentAction.TaskParams taskParams) ); // threadsafe check to verify we are not loading/loaded the model if (deploymentIdToTask.putIfAbsent(taskParams.getDeploymentId(), task) == null) { - loadingModels.add(task); + loadingModels.offer(task); } else { // If there is already a task for the deployment, unregister the new task taskManager.unregister(task); } } - private void handleLoadSuccess(TrainedModelDeploymentTask task) { + private void handleLoadSuccess(ActionListener retryListener, TrainedModelDeploymentTask task) { logger.debug( () -> "[" + task.getParams().getDeploymentId() @@ -704,13 +723,16 @@ private void handleLoadSuccess(TrainedModelDeploymentTask task) { task.stoppedReason().orElse("_unknown_") ) ); + retryListener.onResponse(false); return; } updateStoredState( task.getDeploymentId(), RoutingInfoUpdate.updateStateAndReason(new RoutingStateAndReason(RoutingState.STARTED, "")), - ActionListener.wrap(r -> logger.debug(() -> "[" + task.getDeploymentId() + "] model loaded and accepting routes"), e -> { + ActionListener.runAfter(ActionListener.wrap(r -> { + logger.debug(() -> "[" + task.getDeploymentId() + "] model loaded and accepting routes"); + }, e -> { // This means that either the assignment has been deleted, or this node's particular route has been removed if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { logger.debug( @@ -732,7 +754,7 @@ private void handleLoadSuccess(TrainedModelDeploymentTask task) { e ); } - }) + }), () -> retryListener.onResponse(false)) ); } @@ -752,7 +774,7 @@ private void updateStoredState(String deploymentId, RoutingInfoUpdate update, Ac ); } - private void handleLoadFailure(TrainedModelDeploymentTask task, Exception ex) { + private void handleLoadFailure(TrainedModelDeploymentTask task, Exception ex, ActionListener retryListener) { logger.error(() -> "[" + task.getDeploymentId() + "] model [" + task.getParams().getModelId() + "] failed to load", ex); if (task.isStopped()) { logger.debug( @@ -769,14 +791,14 @@ private void handleLoadFailure(TrainedModelDeploymentTask task, Exception ex) { Runnable stopTask = () -> stopDeploymentAsync( task, "model failed to load; reason [" + ex.getMessage() + "]", - ActionListener.noop() + ActionListener.running(() -> retryListener.onResponse(false)) ); updateStoredState( task.getDeploymentId(), RoutingInfoUpdate.updateStateAndReason( new RoutingStateAndReason(RoutingState.FAILED, ExceptionsHelper.unwrapCause(ex).getMessage()) ), - ActionListener.wrap(r -> stopTask.run(), e -> stopTask.run()) + ActionListener.running(stopTask) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index ef8af6af445fb..624ef5434e2a0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; @@ -50,7 +51,7 @@ class TrainedModelAssignmentRebalancer { private final TrainedModelAssignmentMetadata currentMetadata; private final Map nodeLoads; private final Map, Collection> mlNodesByZone; - private final Optional deploymentToAdd; + private final Optional createAssignmentRequest; private final int allocatedProcessorsScale; private final boolean useNewMemoryFields; @@ -59,28 +60,29 @@ class TrainedModelAssignmentRebalancer { TrainedModelAssignmentMetadata currentMetadata, Map nodeLoads, Map, Collection> mlNodesByZone, - Optional deploymentToAdd, + Optional createAssignmentRequest, int allocatedProcessorsScale, boolean useNewMemoryFields ) { this.currentMetadata = Objects.requireNonNull(currentMetadata); this.nodeLoads = Objects.requireNonNull(nodeLoads); this.mlNodesByZone = Objects.requireNonNull(mlNodesByZone); - this.deploymentToAdd = Objects.requireNonNull(deploymentToAdd); + this.createAssignmentRequest = Objects.requireNonNull(createAssignmentRequest); this.allocatedProcessorsScale = allocatedProcessorsScale; this.useNewMemoryFields = useNewMemoryFields; } TrainedModelAssignmentMetadata.Builder rebalance() { - if (deploymentToAdd.isPresent() && currentMetadata.hasDeployment(deploymentToAdd.get().getDeploymentId())) { + if (createAssignmentRequest.isPresent() + && currentMetadata.hasDeployment(createAssignmentRequest.get().getTaskParams().getDeploymentId())) { throw new ResourceAlreadyExistsException( "[{}] assignment for deployment with model [{}] already exists", - deploymentToAdd.get().getDeploymentId(), - deploymentToAdd.get().getModelId() + createAssignmentRequest.get().getTaskParams().getDeploymentId(), + createAssignmentRequest.get().getTaskParams().getModelId() ); } - if (deploymentToAdd.isEmpty() && areAllModelsSatisfiedAndNoOutdatedRoutingEntries()) { + if (createAssignmentRequest.isEmpty() && areAllModelsSatisfiedAndNoOutdatedRoutingEntries()) { logger.trace(() -> "No need to rebalance as all model deployments are satisfied"); return TrainedModelAssignmentMetadata.Builder.fromMetadata(currentMetadata); } @@ -176,14 +178,15 @@ private AssignmentPlan computePlanForNormalPriorityModels( assignment.getTaskParams().getThreadsPerAllocation(), currentAssignments, assignment.getMaxAssignedAllocations(), + assignment.getAdaptiveAllocationsSettings(), // in the mixed cluster state use old memory fields to avoid unstable assignment plans useNewMemoryFields ? assignment.getTaskParams().getPerDeploymentMemoryBytes() : 0, useNewMemoryFields ? assignment.getTaskParams().getPerAllocationMemoryBytes() : 0 ); }) .forEach(planDeployments::add); - if (deploymentToAdd.isPresent() && deploymentToAdd.get().getPriority() != Priority.LOW) { - StartTrainedModelDeploymentAction.TaskParams taskParams = deploymentToAdd.get(); + if (createAssignmentRequest.isPresent() && createAssignmentRequest.get().getTaskParams().getPriority() != Priority.LOW) { + StartTrainedModelDeploymentAction.TaskParams taskParams = createAssignmentRequest.get().getTaskParams(); planDeployments.add( new AssignmentPlan.Deployment( taskParams.getDeploymentId(), @@ -192,6 +195,7 @@ private AssignmentPlan computePlanForNormalPriorityModels( taskParams.getThreadsPerAllocation(), Map.of(), 0, + createAssignmentRequest.get().getAdaptiveAllocationsSettings(), // in the mixed cluster state use old memory fields to avoid unstable assignment plans useNewMemoryFields ? taskParams.getPerDeploymentMemoryBytes() : 0, useNewMemoryFields ? taskParams.getPerAllocationMemoryBytes() : 0 @@ -231,14 +235,15 @@ private AssignmentPlan computePlanForLowPriorityModels(Set assignableNod assignment.getTaskParams().getThreadsPerAllocation(), findFittingAssignments(assignment, assignableNodeIds, remainingNodeMemory), assignment.getMaxAssignedAllocations(), + assignment.getAdaptiveAllocationsSettings(), Priority.LOW, (useNewMemoryFields == false) ? assignment.getTaskParams().getPerDeploymentMemoryBytes() : 0, (useNewMemoryFields == false) ? assignment.getTaskParams().getPerAllocationMemoryBytes() : 0 ) ) .forEach(planDeployments::add); - if (deploymentToAdd.isPresent() && deploymentToAdd.get().getPriority() == Priority.LOW) { - StartTrainedModelDeploymentAction.TaskParams taskParams = deploymentToAdd.get(); + if (createAssignmentRequest.isPresent() && createAssignmentRequest.get().getTaskParams().getPriority() == Priority.LOW) { + StartTrainedModelDeploymentAction.TaskParams taskParams = createAssignmentRequest.get().getTaskParams(); planDeployments.add( new AssignmentPlan.Deployment( taskParams.getDeploymentId(), @@ -247,6 +252,7 @@ private AssignmentPlan computePlanForLowPriorityModels(Set assignableNod taskParams.getThreadsPerAllocation(), Map.of(), 0, + createAssignmentRequest.get().getAdaptiveAllocationsSettings(), Priority.LOW, (useNewMemoryFields == false) ? taskParams.getPerDeploymentMemoryBytes() : 0, (useNewMemoryFields == false) ? taskParams.getPerAllocationMemoryBytes() : 0 @@ -325,11 +331,12 @@ private TrainedModelAssignmentMetadata.Builder buildAssignmentsFromPlan(Assignme for (AssignmentPlan.Deployment deployment : assignmentPlan.models()) { TrainedModelAssignment existingAssignment = currentMetadata.getDeploymentAssignment(deployment.id()); - TrainedModelAssignment.Builder assignmentBuilder = TrainedModelAssignment.Builder.empty( - existingAssignment == null && deploymentToAdd.isPresent() - ? deploymentToAdd.get() - : currentMetadata.getDeploymentAssignment(deployment.id()).getTaskParams() - ); + TrainedModelAssignment.Builder assignmentBuilder = existingAssignment == null && createAssignmentRequest.isPresent() + ? TrainedModelAssignment.Builder.empty(createAssignmentRequest.get()) + : TrainedModelAssignment.Builder.empty( + currentMetadata.getDeploymentAssignment(deployment.id()).getTaskParams(), + currentMetadata.getDeploymentAssignment(deployment.id()).getAdaptiveAllocationsSettings() + ); if (existingAssignment != null) { assignmentBuilder.setStartTime(existingAssignment.getStartTime()); assignmentBuilder.setMaxAssignedAllocations(existingAssignment.getMaxAssignedAllocations()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java index 0609e0e6ff916..bf19b505e5cfe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java @@ -30,7 +30,6 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAssignmentAction; -import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelAssignmentRoutingInfoAction; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; @@ -85,10 +84,10 @@ public void updateModelAssignmentState( } public void createNewModelAssignment( - StartTrainedModelDeploymentAction.TaskParams taskParams, + CreateTrainedModelAssignmentAction.Request request, ActionListener listener ) { - client.execute(CreateTrainedModelAssignmentAction.INSTANCE, new CreateTrainedModelAssignmentAction.Request(taskParams), listener); + client.execute(CreateTrainedModelAssignmentAction.INSTANCE, request, listener); } public void deleteModelAssignment(String modelId, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java index 98988ffa11055..0151c8f5ee9c8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java @@ -60,6 +60,7 @@ Deployment modifyModelPreservingPreviousAssignments(Deployment m) { m.threadsPerAllocation(), calculateAllocationsPerNodeToPreserve(m), m.maxAssignedAllocations(), + m.getAdaptiveAllocationsSettings(), m.perDeploymentMemoryBytes(), m.perAllocationMemoryBytes() ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java index 123c728587604..7fc16394ed85c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Tuple; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; import java.util.ArrayList; @@ -37,11 +38,11 @@ public record Deployment( int threadsPerAllocation, Map currentAllocationsByNodeId, int maxAssignedAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, Priority priority, long perDeploymentMemoryBytes, long perAllocationMemoryBytes ) { - public Deployment( String id, long modelBytes, @@ -49,6 +50,7 @@ public Deployment( int threadsPerAllocation, Map currentAllocationsByNodeId, int maxAssignedAllocations, + AdaptiveAllocationsSettings adaptiveAllocationsSettings, long perDeploymentMemoryBytes, long perAllocationMemoryBytes ) { @@ -59,12 +61,17 @@ public Deployment( threadsPerAllocation, currentAllocationsByNodeId, maxAssignedAllocations, + adaptiveAllocationsSettings, Priority.NORMAL, perDeploymentMemoryBytes, perAllocationMemoryBytes ); } + public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { + return adaptiveAllocationsSettings; + } + int getCurrentAssignedAllocations() { return currentAllocationsByNodeId.values().stream().mapToInt(Integer::intValue).sum(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java index b1c017b1a784c..38279a2fd6c03 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java @@ -118,6 +118,7 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat // don't rely on the current allocation new HashMap<>(), m.maxAssignedAllocations(), + m.getAdaptiveAllocationsSettings(), m.perDeploymentMemoryBytes(), m.perAllocationMemoryBytes() ) @@ -149,6 +150,7 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat m.threadsPerAllocation(), currentAllocationsByNodeId, m.maxAssignedAllocations(), + m.getAdaptiveAllocationsSettings(), m.perDeploymentMemoryBytes(), m.perAllocationMemoryBytes() ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java index 9af2e4cd49b17..1f0857391598f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java @@ -129,6 +129,7 @@ private AssignmentPlan computeZonePlan( (tryAssigningPreviouslyAssignedModels && modelIdToRemainingAllocations.get(m.id()) == m.allocations()) ? m.maxAssignedAllocations() : 0, + m.getAdaptiveAllocationsSettings(), // Only force assigning at least once previously assigned models that have not had any allocation yet m.perDeploymentMemoryBytes(), m.perAllocationMemoryBytes() @@ -154,6 +155,7 @@ private AssignmentPlan computePlanAcrossAllNodes(List plans) { m.threadsPerAllocation(), allocationsByNodeIdByModelId.get(m.id()), m.maxAssignedAllocations(), + m.getAdaptiveAllocationsSettings(), m.perDeploymentMemoryBytes(), m.perAllocationMemoryBytes() ) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java index f8f9caf365918..d2179a69ebc24 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java @@ -11,12 +11,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rescore.RescorerBuilder; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -134,10 +134,10 @@ public RescorerBuilder rewrite(QueryRewriteContex } @Override - public ActionRequestValidationException validate(SearchRequest searchRequest, ActionRequestValidationException validationException) { - validationException = super.validate(searchRequest, validationException); + public ActionRequestValidationException validate(SearchSourceBuilder source, ActionRequestValidationException validationException) { + validationException = super.validate(source, validationException); - int searchRequestPaginationSize = searchRequest.source().from() + searchRequest.source().size(); + int searchRequestPaginationSize = source.from() + source.size(); if (windowSize() < searchRequestPaginationSize) { return addValidationError( @@ -151,7 +151,7 @@ public ActionRequestValidationException validate(SearchRequest searchRequest, Ac } @SuppressWarnings("rawtypes") - List rescorers = searchRequest.source().rescores(); + List rescorers = source.rescores(); assert rescorers != null && rescorers.contains(this); for (int i = rescorers.indexOf(this) + 1; i < rescorers.size(); i++) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerContext.java index b1df3a2da7c42..e03370b415417 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerContext.java @@ -24,6 +24,8 @@ import java.util.ArrayList; import java.util.List; +import static java.util.function.Predicate.not; + public class LearningToRankRescorerContext extends RescoreContext { final SearchExecutionContext executionContext; @@ -52,12 +54,9 @@ public LearningToRankRescorerContext( List buildFeatureExtractors(IndexSearcher searcher) throws IOException { assert this.regressionModelDefinition != null && this.learningToRankConfig != null; + List featureExtractors = new ArrayList<>(); - if (this.regressionModelDefinition.inputFields().isEmpty() == false) { - featureExtractors.add( - new FieldValueFeatureExtractor(new ArrayList<>(this.regressionModelDefinition.inputFields()), this.executionContext) - ); - } + List weights = new ArrayList<>(); List queryFeatureNames = new ArrayList<>(); for (LearningToRankFeatureExtractorBuilder featureExtractorBuilder : learningToRankConfig.getFeatureExtractorBuilders()) { @@ -72,6 +71,14 @@ List buildFeatureExtractors(IndexSearcher searcher) throws IOE featureExtractors.add(new QueryFeatureExtractor(queryFeatureNames, weights)); } + List fieldValueExtractorFields = this.regressionModelDefinition.inputFields() + .stream() + .filter(not(queryFeatureNames::contains)) + .toList(); + if (fieldValueExtractorFields.isEmpty() == false) { + featureExtractors.add(new FieldValueFeatureExtractor(fieldValueExtractorFields, this.executionContext)); + } + return featureExtractors; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java index 6b6ab43e10c58..c28fc8f44c3fa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessor.java @@ -62,10 +62,18 @@ static InferenceResults processResult( if (chunkResults) { var embeddings = new ArrayList(); for (int i = 0; i < pyTorchResult.getInferenceResult()[0].length; i++) { - int startOffset = tokenization.getTokenization(i).tokens().get(0).get(0).startOffset(); - int lastIndex = tokenization.getTokenization(i).tokens().get(0).size() - 1; - int endOffset = tokenization.getTokenization(i).tokens().get(0).get(lastIndex).endOffset(); - String matchedText = tokenization.getTokenization(i).input().get(0).substring(startOffset, endOffset); + String matchedText; + if (tokenization.getTokenization(i).tokens().get(0).isEmpty() == false) { + int startOffset = tokenization.getTokenization(i).tokens().get(0).get(0).startOffset(); + int lastIndex = tokenization.getTokenization(i).tokens().get(0).size() - 1; + int endOffset = tokenization.getTokenization(i).tokens().get(0).get(lastIndex).endOffset(); + matchedText = tokenization.getTokenization(i).input().get(0).substring(startOffset, endOffset); + + } else { + // No tokens in the input, this should only happen with and empty string + assert tokenization.getTokenization(i).input().get(0).isEmpty(); + matchedText = ""; + } embeddings.add( new MlChunkedTextEmbeddingFloatResults.EmbeddingChunk(matchedText, pyTorchResult.getInferenceResult()[0][i]) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessor.java index 3939bbef4052a..2efeb7e6564f3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessor.java @@ -75,10 +75,17 @@ static InferenceResults processResult( var chunkedResults = new ArrayList(); for (int i = 0; i < pyTorchResult.getInferenceResult()[0].length; i++) { - int startOffset = tokenization.getTokenization(i).tokens().get(0).get(0).startOffset(); - int lastIndex = tokenization.getTokenization(i).tokens().get(0).size() - 1; - int endOffset = tokenization.getTokenization(i).tokens().get(0).get(lastIndex).endOffset(); - String matchedText = tokenization.getTokenization(i).input().get(0).substring(startOffset, endOffset); + String matchedText; + if (tokenization.getTokenization(i).tokens().get(0).isEmpty() == false) { + int startOffset = tokenization.getTokenization(i).tokens().get(0).get(0).startOffset(); + int lastIndex = tokenization.getTokenization(i).tokens().get(0).size() - 1; + int endOffset = tokenization.getTokenization(i).tokens().get(0).get(lastIndex).endOffset(); + matchedText = tokenization.getTokenization(i).input().get(0).substring(startOffset, endOffset); + } else { + // No tokens in the input, this should only happen with and empty string + assert tokenization.getTokenization(i).input().get(0).isEmpty(); + matchedText = ""; + } var weightedTokens = sparseVectorToTokenWeights(pyTorchResult.getInferenceResult()[0][i], tokenization, replacementVocab); weightedTokens.sort((t1, t2) -> Float.compare(t2.weight(), t1.weight())); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java index 87fad19ab87fc..1bb2f1006822e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java @@ -160,11 +160,11 @@ void processInferenceResult(PyTorchResult result) { } logger.debug(() -> format("[%s] Parsed inference result with id [%s]", modelId, result.requestId())); - updateStats(timeMs, Boolean.TRUE.equals(result.isCacheHit())); PendingResult pendingResult = pendingResults.remove(result.requestId()); if (pendingResult == null) { logger.debug(() -> format("[%s] no pending result for inference [%s]", modelId, result.requestId())); } else { + updateStats(timeMs, Boolean.TRUE.equals(result.isCacheHit())); pendingResult.listener.onResponse(result); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index 50342a7bf99e0..f9e4e62e4e3bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -74,7 +74,6 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.filter.Filters; import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator; -import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.metrics.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.Stats; import org.elasticsearch.search.aggregations.metrics.TopHits; @@ -1816,20 +1815,13 @@ public void getForecastStats( handler.accept(new ForecastStats()); return; } - Map aggregationsAsMap = aggregations.asMap(); - StatsAccumulator memoryStats = StatsAccumulator.fromStatsAggregation( - (Stats) aggregationsAsMap.get(ForecastStats.Fields.MEMORY) - ); - Stats aggRecordsStats = (Stats) aggregationsAsMap.get(ForecastStats.Fields.RECORDS); + StatsAccumulator memoryStats = StatsAccumulator.fromStatsAggregation(aggregations.get(ForecastStats.Fields.MEMORY)); + Stats aggRecordsStats = aggregations.get(ForecastStats.Fields.RECORDS); // Stats already gives us all the counts and every doc as a "records" field. long totalHits = aggRecordsStats.getCount(); StatsAccumulator recordStats = StatsAccumulator.fromStatsAggregation(aggRecordsStats); - StatsAccumulator runtimeStats = StatsAccumulator.fromStatsAggregation( - (Stats) aggregationsAsMap.get(ForecastStats.Fields.RUNTIME) - ); - CountAccumulator statusCount = CountAccumulator.fromTermsAggregation( - (StringTerms) aggregationsAsMap.get(ForecastStats.Fields.STATUSES) - ); + StatsAccumulator runtimeStats = StatsAccumulator.fromStatsAggregation(aggregations.get(ForecastStats.Fields.RUNTIME)); + CountAccumulator statusCount = CountAccumulator.fromTermsAggregation(aggregations.get(ForecastStats.Fields.STATUSES)); ForecastStats forecastStats = new ForecastStats(totalHits, memoryStats, recordStats, runtimeStats, statusCount); handler.accept(forecastStats); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java index 587638e9ef7c9..6d972bcf5863a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java @@ -15,6 +15,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -41,6 +43,10 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ml.search.WeightedTokensQueryBuilder.PRUNING_CONFIG; +/** + * @deprecated Replaced by sparse_vector query + */ +@Deprecated public class TextExpansionQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "text_expansion"; @@ -53,6 +59,9 @@ public class TextExpansionQueryBuilder extends AbstractQueryBuilder weightedTokensSupplier; private final TokenPruningConfig tokenPruningConfig; + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class); + public static final String TEXT_EXPANSION_DEPRECATION_MESSAGE = NAME + " is deprecated. Use sparse_vector instead."; + public TextExpansionQueryBuilder(String fieldName, String modelText, String modelId) { this(fieldName, modelText, modelId, null); } @@ -242,6 +251,9 @@ protected int doHashCode() { } public static TextExpansionQueryBuilder fromXContent(XContentParser parser) throws IOException { + + deprecationLogger.warn(DeprecationCategory.API, NAME, TEXT_EXPANSION_DEPRECATION_MESSAGE); + String fieldName = null; String modelText = null; String modelId = null; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java index 40cf7d531d5ee..e308eb6007973 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentAction.java @@ -7,15 +7,11 @@ package org.elasticsearch.xpack.ml.rest.inference; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -27,7 +23,6 @@ import java.io.IOException; import java.util.Collections; import java.util.List; -import java.util.Objects; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction.Request.CACHE_SIZE; @@ -87,22 +82,11 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient } if (restRequest.hasParam(TIMEOUT.getPreferredName())) { - TimeValue openTimeout = validateParameters( - request.getTimeout(), - restRequest.paramAsTime(TIMEOUT.getPreferredName(), StartTrainedModelDeploymentAction.DEFAULT_TIMEOUT), - StartTrainedModelDeploymentAction.DEFAULT_TIMEOUT - ); // hasParam, so never default - request.setTimeout(openTimeout); + request.setTimeout(restRequest.paramAsTime(TIMEOUT.getPreferredName(), request.getTimeout())); } request.setWaitForState( - validateParameters( - request.getWaitForState(), - AllocationStatus.State.fromString( - restRequest.param(WAIT_FOR.getPreferredName(), StartTrainedModelDeploymentAction.DEFAULT_WAITFOR_STATE.toString()) - ), - StartTrainedModelDeploymentAction.DEFAULT_WAITFOR_STATE - ) + AllocationStatus.State.fromString(restRequest.param(WAIT_FOR.getPreferredName(), request.getWaitForState().toString())) ); RestCompatibilityChecker.checkAndSetDeprecatedParam( @@ -110,84 +94,34 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient NUMBER_OF_ALLOCATIONS.getPreferredName(), RestApiVersion.V_8, restRequest, - (r, s) -> validateParameters( - request.getNumberOfAllocations(), - r.paramAsInt(s, StartTrainedModelDeploymentAction.DEFAULT_NUM_ALLOCATIONS), - StartTrainedModelDeploymentAction.DEFAULT_NUM_ALLOCATIONS - ), + // This is to propagate a null value, which paramAsInt does not support. + (r, s) -> r.hasParam(s) ? (Integer) r.paramAsInt(s, 0) : request.getNumberOfAllocations(), request::setNumberOfAllocations ); + RestCompatibilityChecker.checkAndSetDeprecatedParam( THREADS_PER_ALLOCATION.getDeprecatedNames()[0], THREADS_PER_ALLOCATION.getPreferredName(), RestApiVersion.V_8, restRequest, - (r, s) -> validateParameters( - request.getThreadsPerAllocation(), - r.paramAsInt(s, StartTrainedModelDeploymentAction.DEFAULT_NUM_THREADS), - StartTrainedModelDeploymentAction.DEFAULT_NUM_THREADS - ), + (r, s) -> r.paramAsInt(s, request.getThreadsPerAllocation()), request::setThreadsPerAllocation ); - request.setQueueCapacity( - validateParameters( - request.getQueueCapacity(), - restRequest.paramAsInt(QUEUE_CAPACITY.getPreferredName(), StartTrainedModelDeploymentAction.DEFAULT_QUEUE_CAPACITY), - StartTrainedModelDeploymentAction.DEFAULT_QUEUE_CAPACITY - ) - ); + + request.setQueueCapacity(restRequest.paramAsInt(QUEUE_CAPACITY.getPreferredName(), request.getQueueCapacity())); if (restRequest.hasParam(CACHE_SIZE.getPreferredName())) { request.setCacheSize( - validateParameters( - request.getCacheSize(), - ByteSizeValue.parseBytesSizeValue(restRequest.param(CACHE_SIZE.getPreferredName()), CACHE_SIZE.getPreferredName()), - null - ) + ByteSizeValue.parseBytesSizeValue(restRequest.param(CACHE_SIZE.getPreferredName()), CACHE_SIZE.getPreferredName()) ); } else if (defaultCacheSize != null && request.getCacheSize() == null) { request.setCacheSize(defaultCacheSize); } request.setPriority( - validateParameters( - request.getPriority().toString(), - restRequest.param(StartTrainedModelDeploymentAction.TaskParams.PRIORITY.getPreferredName()), - StartTrainedModelDeploymentAction.DEFAULT_PRIORITY.toString() - ) + restRequest.param(StartTrainedModelDeploymentAction.TaskParams.PRIORITY.getPreferredName(), request.getPriority().toString()) ); return channel -> client.execute(StartTrainedModelDeploymentAction.INSTANCE, request, new RestToXContentListener<>(channel)); } - - /** - * This function validates that the body and query parameters don't conflict, and returns the value that should be used. - * When using this function, the body parameter should already have been set to the default value in - * {@link StartTrainedModelDeploymentAction}, or, set to a different value from the rest request. - * - * @param paramDefault (from {@link StartTrainedModelDeploymentAction}) - * @return the parameter to use - * @throws ElasticsearchStatusException if the parameters don't match - */ - private static T validateParameters(@Nullable T bodyParam, @Nullable T queryParam, @Nullable T paramDefault) - throws ElasticsearchStatusException { - if (Objects.equals(bodyParam, paramDefault) && queryParam != null) { - // the body param is the same as the default for this value. We cannot tell if this was set intentionally, or if it was just the - // default, thus we will assume it was the default - return queryParam; - } - - if (Objects.equals(bodyParam, queryParam)) { - return bodyParam; - } else if (bodyParam == null) { - return queryParam; - } else if (queryParam == null) { - return bodyParam; - } else { - throw new ElasticsearchStatusException( - "The parameter " + bodyParam + " in the body is different from the parameter " + queryParam + " in the query", - RestStatus.BAD_REQUEST - ); - } - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java index 084a9d95939c5..afa372fb94527 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java @@ -1015,6 +1015,7 @@ private Map setupComplexMocks() { null, null, null, + null, Instant.now(), List.of( AssignmentStats.NodeStats.forStartedState( @@ -1064,6 +1065,7 @@ private Map setupComplexMocks() { "model_4", 2, 2, + null, 1000, ByteSizeValue.ofBytes(1000), Instant.now(), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java index 2f30d131021b4..2f251e3b0aee6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java @@ -13,11 +13,14 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.IndicesAdminClient; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ml.inference.adaptiveallocations.AdaptiveAllocationsScalerService; +import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; import org.junit.Before; import java.util.Map; @@ -36,6 +39,7 @@ public class MlInitializationServiceTests extends ESTestCase { private ThreadPool threadPool; private ClusterService clusterService; private Client client; + private InferenceAuditor inferenceAuditor; private MlAssignmentNotifier mlAssignmentNotifier; @Before @@ -44,9 +48,11 @@ public void setUpMocks() { threadPool = deterministicTaskQueue.getThreadPool(); clusterService = mock(ClusterService.class); client = mock(Client.class); + inferenceAuditor = mock(InferenceAuditor.class); mlAssignmentNotifier = mock(MlAssignmentNotifier.class); when(clusterService.getClusterName()).thenReturn(CLUSTER_NAME); + when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); @SuppressWarnings("unchecked") ActionFuture getSettingsResponseActionFuture = mock(ActionFuture.class); @@ -68,6 +74,7 @@ public void testInitialize() { threadPool, clusterService, client, + inferenceAuditor, mlAssignmentNotifier, true, true, @@ -83,6 +90,7 @@ public void testInitialize_noMasterNode() { threadPool, clusterService, client, + inferenceAuditor, mlAssignmentNotifier, true, true, @@ -94,11 +102,13 @@ public void testInitialize_noMasterNode() { public void testNodeGoesFromMasterToNonMasterAndBack() { MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class); + AdaptiveAllocationsScalerService adaptiveAllocationsScalerService = mock(AdaptiveAllocationsScalerService.class); MlInitializationService initializationService = new MlInitializationService( client, threadPool, initialDailyMaintenanceService, + adaptiveAllocationsScalerService, clusterService ); initializationService.offMaster(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java index 2b206de4cf42f..bdabb42ecd467 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java @@ -191,7 +191,7 @@ public void testIsNodeSafeToShutdownReturnsFalseWhenStartingDeploymentExists() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( "1", - TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom(), null) .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -215,12 +215,12 @@ public void testIsNodeSafeToShutdownReturnsFalseWhenStoppingAndStoppedDeployment TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( "1", - TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom(), null) .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) ) .addNewAssignment( "2", - TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom(), null) .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) ) .build() @@ -244,12 +244,12 @@ public void testIsNodeSafeToShutdownReturnsTrueWhenStoppedDeploymentsExist() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( "1", - TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom(), null) .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) ) .addNewAssignment( "2", - TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom(), null) .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) ) .build() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java index 2262c21070e75..5fb1381b881ea 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java @@ -132,18 +132,18 @@ public void testFindTrainedModelAllocationCounts() { TrainedModelAssignmentMetadata.Builder metadataBuilder = TrainedModelAssignmentMetadata.Builder.empty(); metadataBuilder.addNewAssignment( "model1", - TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class), null) .addRoutingEntry("node1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry("node2", new RoutingInfo(0, 1, RoutingState.FAILED, "")) ); metadataBuilder.addNewAssignment( "model2", - TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class), null) .addRoutingEntry("node1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) ); metadataBuilder.addNewAssignment( "model3", - TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class), null) .addRoutingEntry("node2", new RoutingInfo(0, 1, RoutingState.STARTING, "")) ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java index b8dd3559253ee..4a66be4a773f5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java @@ -83,6 +83,7 @@ public void testAddFailedRoutes_GivenMixedResponses() throws UnknownHostExceptio "deployment1", randomBoolean() ? null : randomIntBetween(1, 8), randomBoolean() ? null : randomIntBetween(1, 8), + null, randomBoolean() ? null : randomIntBetween(1, 10000), randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), Instant.now(), @@ -121,6 +122,7 @@ public void testAddFailedRoutes_TaskResultIsOverwritten() throws UnknownHostExce "deployment1", randomBoolean() ? null : randomIntBetween(1, 8), randomBoolean() ? null : randomIntBetween(1, 8), + null, randomBoolean() ? null : randomIntBetween(1, 10000), randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), Instant.now(), @@ -169,7 +171,8 @@ private static TrainedModelAssignment createAssignment(String modelId) { Priority.NORMAL, 0L, 0L - ) + ), + null ).build(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java index 0d91ce45c46ba..41a86e436f468 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java @@ -1143,7 +1143,8 @@ public void testGetMemoryAndProcessorsScaleDown() throws InterruptedException { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build(), "model-2", TrainedModelAssignment.Builder.empty( @@ -1158,7 +1159,8 @@ public void testGetMemoryAndProcessorsScaleDown() throws InterruptedException { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() ), List.of( @@ -1242,7 +1244,8 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByMinNodes() throws Inte Priority.NORMAL, 0L, 0L - ) + ), + null ) .addRoutingEntry("ml-node-1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) .addRoutingEntry("ml-node-2", new RoutingInfo(2, 2, RoutingState.STARTED, "")) @@ -1260,7 +1263,8 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByMinNodes() throws Inte Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() ), List.of( @@ -1334,7 +1338,8 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByDummyEntityMemory() th Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build(), "model-2", TrainedModelAssignment.Builder.empty( @@ -1349,7 +1354,8 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByDummyEntityMemory() th Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() ), List.of( @@ -1432,7 +1438,8 @@ public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityProcesso Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build(), "model-2", TrainedModelAssignment.Builder.empty( @@ -1447,7 +1454,8 @@ public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityProcesso Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() ), List.of( @@ -1525,7 +1533,8 @@ public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityAsMemory Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build(), "model-2", TrainedModelAssignment.Builder.empty( @@ -1540,7 +1549,8 @@ public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityAsMemory Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry("ml-node-3", new RoutingInfo(1, 1, RoutingState.STARTED, "")).build() ), List.of( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java index a916900b199ce..970044c188849 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java @@ -1069,7 +1069,8 @@ public void testCpuModelAssignmentRequirements() { Priority.NORMAL, 0L, 0L - ) + ), + null ).build(), TrainedModelAssignment.Builder.empty( new StartTrainedModelDeploymentAction.TaskParams( @@ -1083,7 +1084,8 @@ public void testCpuModelAssignmentRequirements() { Priority.NORMAL, 0L, 0L - ) + ), + null ).build() ), withMlNodes("ml_node_1", "ml_node_2"), @@ -1105,7 +1107,8 @@ public void testCpuModelAssignmentRequirements() { Priority.NORMAL, 0L, 0L - ) + ), + null ).build(), TrainedModelAssignment.Builder.empty( new StartTrainedModelDeploymentAction.TaskParams( @@ -1119,7 +1122,8 @@ public void testCpuModelAssignmentRequirements() { Priority.NORMAL, 0L, 0L - ) + ), + null ).build() ), withMlNodes("ml_node_1", "ml_node_2"), @@ -1141,7 +1145,8 @@ public void testCpuModelAssignmentRequirements() { Priority.NORMAL, 0L, 0L - ) + ), + null ).build(), TrainedModelAssignment.Builder.empty( new StartTrainedModelDeploymentAction.TaskParams( @@ -1155,7 +1160,8 @@ public void testCpuModelAssignmentRequirements() { Priority.NORMAL, 0L, 0L - ) + ), + null ).build() ), withMlNodes("ml_node_1", "ml_node_2", "ml_node_3", "ml_node_4"), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java index 97fd66e284010..ba40dc0bfdda7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java @@ -79,7 +79,8 @@ public void testScale_GivenCurrentCapacityIsUsedExactly() { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId1, new RoutingInfo(2, 2, RoutingState.STARTED, "")) ) .addNewAssignment( @@ -96,7 +97,8 @@ public void testScale_GivenCurrentCapacityIsUsedExactly() { Priority.NORMAL, 0L, 0L - ) + ), + null ) .addRoutingEntry(mlNodeId1, new RoutingInfo(2, 2, RoutingState.STARTED, "")) .addRoutingEntry(mlNodeId2, new RoutingInfo(8, 8, RoutingState.STARTED, "")) @@ -153,7 +155,8 @@ public void testScale_GivenUnsatisfiedDeployments() { Priority.NORMAL, 0L, 0L - ) + ), + null ) ) .addNewAssignment( @@ -170,7 +173,8 @@ public void testScale_GivenUnsatisfiedDeployments() { Priority.NORMAL, 0L, 0L - ) + ), + null ) .addRoutingEntry(mlNodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(mlNodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) @@ -227,7 +231,8 @@ public void testScale_GivenUnsatisfiedDeploymentIsLowPriority_ShouldNotScaleUp() Priority.LOW, 0L, 0L - ) + ), + null ) ) .addNewAssignment( @@ -244,7 +249,8 @@ public void testScale_GivenUnsatisfiedDeploymentIsLowPriority_ShouldNotScaleUp() Priority.NORMAL, 0L, 0L - ) + ), + null ) .addRoutingEntry(mlNodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(mlNodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) @@ -301,7 +307,8 @@ public void testScale_GivenMoreThanHalfProcessorsAreUsed() { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId1, new RoutingInfo(2, 2, RoutingState.STARTED, "")) ) .addNewAssignment( @@ -318,7 +325,8 @@ public void testScale_GivenMoreThanHalfProcessorsAreUsed() { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build() @@ -386,7 +394,8 @@ public void testScale_GivenDownScalePossible_DelayNotSatisfied() { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId1, new RoutingInfo(2, 2, RoutingState.STARTED, "")) ) .addNewAssignment( @@ -403,7 +412,8 @@ public void testScale_GivenDownScalePossible_DelayNotSatisfied() { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build() @@ -459,7 +469,8 @@ public void testScale_GivenDownScalePossible_DelaySatisfied() { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId1, new RoutingInfo(2, 2, RoutingState.STARTED, "")) ) .addNewAssignment( @@ -476,7 +487,8 @@ public void testScale_GivenDownScalePossible_DelaySatisfied() { Priority.NORMAL, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build() @@ -536,7 +548,8 @@ public void testScale_GivenLowPriorityDeploymentsOnly() { Priority.LOW, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( @@ -553,7 +566,8 @@ public void testScale_GivenLowPriorityDeploymentsOnly() { Priority.LOW, 0L, 0L - ) + ), + null ).addRoutingEntry(mlNodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java index afd3c3534f7c3..561076c302eda 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java @@ -36,12 +36,7 @@ public final class AggregationTestUtils { private AggregationTestUtils() {} static InternalHistogram.Bucket createHistogramBucket(long timestamp, long docCount, List subAggregations) { - InternalHistogram.Bucket bucket = mock(InternalHistogram.Bucket.class); - when(bucket.getKey()).thenReturn(timestamp); - when(bucket.getDocCount()).thenReturn(docCount); - InternalAggregations aggs = createAggs(subAggregations); - when(bucket.getAggregations()).thenReturn(aggs); - return bucket; + return new InternalHistogram.Bucket(timestamp, docCount, false, DocValueFormat.RAW, createAggs(subAggregations)); } static InternalComposite.InternalBucket createCompositeBucket( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java index a3edd63295cea..fc774a4ee3e48 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessorTests.java @@ -414,11 +414,13 @@ public void testProcessGivenMultipleSingleMetricPerSingleTermsPerHistogram() thr } public void testProcessGivenUnsupportedAggregationUnderHistogram() { - InternalHistogram.Bucket histogramBucket = createHistogramBucket(1000L, 2); InternalAggregation anotherHistogram = mock(InternalAggregation.class); when(anotherHistogram.getName()).thenReturn("nested-agg"); - InternalAggregations subAggs = createAggs(Arrays.asList(createMax("time", 1000), anotherHistogram)); - when(histogramBucket.getAggregations()).thenReturn(subAggs); + InternalHistogram.Bucket histogramBucket = createHistogramBucket( + 1000L, + 2, + Arrays.asList(createMax("time", 1000), anotherHistogram) + ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -428,13 +430,11 @@ public void testProcessGivenUnsupportedAggregationUnderHistogram() { } public void testProcessGivenMultipleBucketAggregations() { - InternalHistogram.Bucket histogramBucket = createHistogramBucket(1000L, 2); StringTerms terms1 = mock(StringTerms.class); when(terms1.getName()).thenReturn("terms_1"); StringTerms terms2 = mock(StringTerms.class); when(terms2.getName()).thenReturn("terms_2"); - InternalAggregations subAggs = createAggs(Arrays.asList(createMax("time", 1000), terms1, terms2)); - when(histogramBucket.getAggregations()).thenReturn(subAggs); + InternalHistogram.Bucket histogramBucket = createHistogramBucket(1000L, 2, Arrays.asList(createMax("time", 1000), terms1, terms2)); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerServiceTests.java new file mode 100644 index 0000000000000..3ad44f256dc66 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerServiceTests.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.adaptiveallocations; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ScalingExecutorBuilder; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; +import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; +import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; +import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class AdaptiveAllocationsScalerServiceTests extends ESTestCase { + + private TestThreadPool threadPool; + private ClusterService clusterService; + private Client client; + private InferenceAuditor inferenceAuditor; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = createThreadPool( + new ScalingExecutorBuilder(MachineLearning.UTILITY_THREAD_POOL_NAME, 0, 1, TimeValue.timeValueMinutes(10), false) + ); + clusterService = mock(ClusterService.class); + client = mock(Client.class); + inferenceAuditor = mock(InferenceAuditor.class); + } + + @Override + @After + public void tearDown() throws Exception { + this.threadPool.close(); + super.tearDown(); + } + + private ClusterState getClusterState(int numAllocations) { + ClusterState clusterState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); + when(clusterState.getMetadata()).thenReturn(metadata); + when(metadata.custom("trained_model_assignment")).thenReturn( + new TrainedModelAssignmentMetadata( + Map.of( + "test-deployment", + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams( + "model-id", + "test-deployment", + 100_000_000, + numAllocations, + 1, + 1024, + ByteSizeValue.ZERO, + Priority.NORMAL, + 100_000_000, + 100_000_000 + ), + new AdaptiveAllocationsSettings(true, null, null) + ).build() + ) + ) + ); + return clusterState; + } + + private GetDeploymentStatsAction.Response getDeploymentStatsResponse(int numAllocations, int inferenceCount, double latency) { + return new GetDeploymentStatsAction.Response( + List.of(), + List.of(), + List.of( + new AssignmentStats( + "test-deployment", + "model-id", + 1, + numAllocations, + new AdaptiveAllocationsSettings(true, null, null), + 1024, + ByteSizeValue.ZERO, + Instant.now(), + List.of( + AssignmentStats.NodeStats.forStartedState( + DiscoveryNodeUtils.create("node_1"), + inferenceCount, + latency, + latency, + 0, + 0, + 0, + 0, + 0, + Instant.now(), + Instant.now(), + 1, + numAllocations, + inferenceCount, + inferenceCount, + latency, + 0 + ) + ), + Priority.NORMAL + ) + ), + 0 + ); + } + + public void test() throws IOException { + // Initialize the cluster with a deployment with 1 allocation. + ClusterState clusterState = getClusterState(1); + when(clusterService.state()).thenReturn(clusterState); + + AdaptiveAllocationsScalerService service = new AdaptiveAllocationsScalerService( + threadPool, + clusterService, + client, + inferenceAuditor, + true, + 1 + ); + service.start(); + + verify(clusterService).state(); + verify(clusterService).addListener(same(service)); + verifyNoMoreInteractions(client, clusterService); + reset(client, clusterService); + + // First cycle: 1 inference request, so no need for scaling. + when(client.threadPool()).thenReturn(threadPool); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(getDeploymentStatsResponse(1, 1, 11.0)); + return Void.TYPE; + }).when(client).execute(eq(GetDeploymentStatsAction.INSTANCE), eq(new GetDeploymentStatsAction.Request("test-deployment")), any()); + + safeSleep(1200); + + verify(client, times(1)).threadPool(); + verify(client, times(1)).execute(eq(GetDeploymentStatsAction.INSTANCE), any(), any()); + verifyNoMoreInteractions(client, clusterService); + reset(client, clusterService); + + // Second cycle: 150 inference request with a latency of 10ms, so scale up to 2 allocations. + when(client.threadPool()).thenReturn(threadPool); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(getDeploymentStatsResponse(1, 150, 10.0)); + return Void.TYPE; + }).when(client).execute(eq(GetDeploymentStatsAction.INSTANCE), eq(new GetDeploymentStatsAction.Request("test-deployment")), any()); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(null); + return Void.TYPE; + }).when(client).execute(eq(UpdateTrainedModelDeploymentAction.INSTANCE), any(), any()); + + safeSleep(1000); + + verify(client, times(2)).threadPool(); + verify(client, times(1)).execute(eq(GetDeploymentStatsAction.INSTANCE), any(), any()); + var updateRequest = new UpdateTrainedModelDeploymentAction.Request("test-deployment"); + updateRequest.setNumberOfAllocations(2); + updateRequest.setIsInternal(true); + verify(client, times(1)).execute(eq(UpdateTrainedModelDeploymentAction.INSTANCE), eq(updateRequest), any()); + verifyNoMoreInteractions(client, clusterService); + reset(client, clusterService); + + clusterState = getClusterState(2); + ClusterChangedEvent clusterChangedEvent = mock(ClusterChangedEvent.class); + when(clusterChangedEvent.state()).thenReturn(clusterState); + service.clusterChanged(clusterChangedEvent); + + // Third cycle: 0 inference requests, but keep 2 allocations, because of cooldown. + when(client.threadPool()).thenReturn(threadPool); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(getDeploymentStatsResponse(2, 0, 9.0)); + return Void.TYPE; + }).when(client).execute(eq(GetDeploymentStatsAction.INSTANCE), eq(new GetDeploymentStatsAction.Request("test-deployment")), any()); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(null); + return Void.TYPE; + }).when(client).execute(eq(UpdateTrainedModelDeploymentAction.INSTANCE), any(), any()); + + safeSleep(1000); + + verify(client, times(1)).threadPool(); + verify(client, times(1)).execute(eq(GetDeploymentStatsAction.INSTANCE), any(), any()); + verifyNoMoreInteractions(client, clusterService); + + service.stop(); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerTests.java new file mode 100644 index 0000000000000..08097357725d0 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.adaptiveallocations; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Random; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.nullValue; + +public class AdaptiveAllocationsScalerTests extends ESTestCase { + + public void testAutoscaling_scaleUpAndDown() { + AdaptiveAllocationsScaler adaptiveAllocationsScaler = new AdaptiveAllocationsScaler("test-deployment", 1); + + // With 1 allocation the system can handle 500 requests * 0.020 sec/request. + // To handle remaining requests the system should scale to 2 allocations. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(500, 100, 100, 0.020), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(2)); + + // With 2 allocation the system can handle 800 requests * 0.025 sec/request. + // To handle remaining requests the system should scale to 3 allocations. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(800, 100, 50, 0.025), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(3)); + + // With 3 allocations the system can handle the load. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(1000, 0, 0, 0.025), 10, 3); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + + // No load anymore, so the system should gradually scale down to 1 allocation. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, Double.NaN), 10, 3); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, Double.NaN), 10, 3); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(2)); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, Double.NaN), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, Double.NaN), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(1)); + } + + public void testAutoscaling_noOscillating() { + AdaptiveAllocationsScaler adaptiveAllocationsScaler = new AdaptiveAllocationsScaler("test-deployment", 1); + + // With 1 allocation the system can handle 880 requests * 0.010 sec/request. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(880, 0, 0, 0.010), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(880, 0, 0, 0.010), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + + // Increase the load to 980 requests * 0.010 sec/request, and the system + // should scale to 2 allocations to have some spare capacity. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(920, 0, 0, 0.010), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(950, 0, 0, 0.010), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(980, 0, 0, 0.010), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(2)); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(980, 0, 0, 0.010), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + + // Reducing the load to just 880 requests * 0.010 sec/request should not + // trigger scaling down again, to prevent oscillating. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(880, 0, 0, 0.010), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(880, 0, 0, 0.010), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + } + + public void testAutoscaling_respectMinMaxAllocations() { + AdaptiveAllocationsScaler adaptiveAllocationsScaler = new AdaptiveAllocationsScaler("test-deployment", 1); + adaptiveAllocationsScaler.setMinMaxNumberOfAllocations(2, 5); + + // Even though there are no requests, scale to the minimum of 2 allocations. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.010), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(2)); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.010), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + + // Even though there are many requests, the scale to the maximum of 5 allocations. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(100, 10000, 1000, 0.010), 10, 2); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(5)); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(500, 10000, 1000, 0.010), 10, 5); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + + // After a while of no requests, scale to the minimum of 2 allocations. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.010), 10, 5); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.010), 10, 5); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.010), 10, 5); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(2)); + } + + public void testEstimation_highVariance() { + AdaptiveAllocationsScaler adaptiveAllocationsScaler = new AdaptiveAllocationsScaler("test-deployment", 1); + + Random random = new Random(42); + + double averageLoadMean = 0.0; + double averageLoadError = 0.0; + + double time = 0.0; + for (int nextMeasurementTime = 1; nextMeasurementTime <= 100; nextMeasurementTime++) { + // Sample one second of data (until the next measurement time). + // This contains approximately 100 requests with high-variance inference times. + AdaptiveAllocationsScalerService.Stats stats = new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0); + while (time < nextMeasurementTime) { + // Draw inference times from a log-normal distribution, which has high variance. + // This distribution approximately has: mean=3.40, variance=98.4. + double inferenceTime = Math.exp(random.nextGaussian(0.1, 1.5)); + stats = stats.add(new AdaptiveAllocationsScalerService.Stats(1, 0, 0, inferenceTime)); + + // The requests are Poisson distributed, which means the time inbetween + // requests follows an exponential distribution. + // This distribution has on average 100 requests per second. + double dt = 0.01 * random.nextExponential(); + time += dt; + } + + adaptiveAllocationsScaler.process(stats, 1, 1); + double lower = adaptiveAllocationsScaler.getLoadLower(); + double upper = adaptiveAllocationsScaler.getLoadUpper(); + averageLoadMean += (upper + lower) / 2.0; + averageLoadError += (upper - lower) / 2.0; + } + + averageLoadMean /= 100; + averageLoadError /= 100; + + double expectedLoad = 100 * 3.40; + assertThat(averageLoadMean - averageLoadError, lessThan(expectedLoad)); + assertThat(averageLoadMean + averageLoadError, greaterThan(expectedLoad)); + assertThat(averageLoadError / averageLoadMean, lessThan(1 - AdaptiveAllocationsScaler.SCALE_UP_THRESHOLD)); + } + + public void testAutoscaling_maxAllocationsSafeguard() { + AdaptiveAllocationsScaler adaptiveAllocationsScaler = new AdaptiveAllocationsScaler("test-deployment", 1); + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(1_000_000, 10_000_000, 1, 0.05), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(32)); + adaptiveAllocationsScaler.setMinMaxNumberOfAllocations(2, 77); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(77)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/KalmanFilter1dTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/KalmanFilter1dTests.java new file mode 100644 index 0000000000000..f9b3a8966b627 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/KalmanFilter1dTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.adaptiveallocations; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; + +public class KalmanFilter1dTests extends ESTestCase { + + public void testEstimation_equalValues() { + KalmanFilter1d filter = new KalmanFilter1d("test-filter", 100, false); + assertThat(filter.hasValue(), equalTo(false)); + + filter.add(42.0, 9.0, false); + assertThat(filter.hasValue(), equalTo(true)); + assertThat(filter.estimate(), equalTo(42.0)); + assertThat(filter.error(), equalTo(3.0)); + assertThat(filter.lower(), equalTo(39.0)); + assertThat(filter.upper(), equalTo(45.0)); + + // With more data the estimation error should go down. + double previousError = filter.error(); + for (int i = 0; i < 20; i++) { + filter.add(42.0, 9.0, false); + assertThat(filter.estimate(), equalTo(42.0)); + assertThat(filter.error(), lessThan(previousError)); + previousError = filter.error(); + } + } + + public void testEstimation_increasingValues() { + KalmanFilter1d filter = new KalmanFilter1d("test-filter", 100, false); + filter.add(10.0, 1.0, false); + assertThat(filter.estimate(), equalTo(10.0)); + + // As the measured values increase, the estimated value should increase too, + // but it should lag behind. + double previousEstimate = filter.estimate(); + for (double value = 11.0; value < 20.0; value += 1.0) { + filter.add(value, 1.0, false); + assertThat(filter.estimate(), greaterThan(previousEstimate)); + assertThat(filter.estimate(), lessThan(value)); + previousEstimate = filter.estimate(); + } + + // More final values should bring the estimate close to it. + for (int i = 0; i < 20; i++) { + filter.add(20.0, 1.0, false); + } + assertThat(filter.estimate(), greaterThan(19.0)); + assertThat(filter.estimate(), lessThan(20.0)); + } + + public void testEstimation_bigJumpNoAutoDetectDynamicsChanges() { + KalmanFilter1d filter = new KalmanFilter1d("test-filter", 100, false); + filter.add(0.0, 100.0, false); + filter.add(0.0, 1.0, false); + assertThat(filter.estimate(), equalTo(0.0)); + + // Without dynamics change autodetection the estimated value should be + // inbetween the old and the new value. + filter.add(100.0, 1.0, false); + assertThat(filter.estimate(), greaterThan(49.0)); + assertThat(filter.estimate(), lessThan(51.0)); + } + + public void testEstimation_bigJumpWithAutoDetectDynamicsChanges() { + KalmanFilter1d filter = new KalmanFilter1d("test-filter", 100, true); + filter.add(0.0, 100.0, false); + filter.add(0.0, 1.0, false); + assertThat(filter.estimate(), equalTo(0.0)); + + // With dynamics change autodetection the estimated value should jump + // instantly to almost the new value. + filter.add(100.0, 1.0, false); + assertThat(filter.estimate(), greaterThan(99.0)); + assertThat(filter.estimate(), lessThan(100.0)); + } + + public void testEstimation_bigJumpWithExternalDetectDynamicsChange() { + KalmanFilter1d filter = new KalmanFilter1d("test-filter", 100, false); + filter.add(0.0, 100.0, false); + filter.add(0.0, 1.0, false); + assertThat(filter.estimate(), equalTo(0.0)); + + // For external dynamics changes the estimated value should jump + // instantly to almost the new value. + filter.add(100.0, 1.0, true); + assertThat(filter.estimate(), greaterThan(99.0)); + assertThat(filter.estimate(), lessThan(100.0)); + } + + public void testEstimation_differentSmoothing() { + KalmanFilter1d quickFilter = new KalmanFilter1d("test-filter", 1e-3, false); + for (int i = 0; i < 100; i++) { + quickFilter.add(42.0, 1.0, false); + } + assertThat(quickFilter.estimate(), equalTo(42.0)); + // With low smoothing, the value should be close to the new value. + quickFilter.add(77.0, 1.0, false); + assertThat(quickFilter.estimate(), greaterThan(75.0)); + assertThat(quickFilter.estimate(), lessThan(77.0)); + + KalmanFilter1d slowFilter = new KalmanFilter1d("test-filter", 1e3, false); + for (int i = 0; i < 100; i++) { + slowFilter.add(42.0, 1.0, false); + } + assertThat(slowFilter.estimate(), equalTo(42.0)); + // With high smoothing, the value should be close to the old value. + slowFilter.add(77.0, 1.0, false); + assertThat(slowFilter.estimate(), greaterThan(42.0)); + assertThat(slowFilter.estimate(), lessThan(44.0)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index f08d2735be8a5..1dc44582492aa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelAssignmentRoutingInfoAction; @@ -277,7 +278,7 @@ public void testUpdateModelRoutingTable() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L), null) .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .addRoutingEntry(startedNode, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) @@ -389,7 +390,10 @@ public void testRemoveAssignment() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, randomNonNegativeLong()))) + .addNewAssignment( + modelId, + TrainedModelAssignment.Builder.empty(newParams(modelId, randomNonNegativeLong()), null) + ) .build() ) .build() @@ -450,7 +454,10 @@ public void testCreateAssignment_GivenModelCannotByFullyAllocated_AndScalingIsPo .build(); TrainedModelAssignmentClusterService trainedModelAssignmentClusterService = createClusterService(5); - ClusterState newState = trainedModelAssignmentClusterService.createModelAssignment(currentState, newParams("new-model", 150, 4, 1)); + ClusterState newState = trainedModelAssignmentClusterService.createModelAssignment( + currentState, + new CreateTrainedModelAssignmentAction.Request(newParams("new-model", 150, 4, 1), null) + ); TrainedModelAssignment createdAssignment = TrainedModelAssignmentMetadata.fromState(newState).getDeploymentAssignment("new-model"); assertThat(createdAssignment, is(not(nullValue()))); @@ -466,7 +473,10 @@ public void testCreateAssignment_GivenModelCannotByFullyAllocated_AndScalingIsPo expectThrows( ResourceAlreadyExistsException.class, - () -> trainedModelAssignmentClusterService.createModelAssignment(newState, newParams("new-model", 150)) + () -> trainedModelAssignmentClusterService.createModelAssignment( + newState, + new CreateTrainedModelAssignmentAction.Request(newParams("new-model", 150), null) + ) ); } @@ -495,7 +505,10 @@ public void testCreateAssignment_GivenModelCannotByFullyAllocated_AndScalingIsNo TrainedModelAssignmentClusterService trainedModelAssignmentClusterService = createClusterService(0); ElasticsearchStatusException e = expectThrows( ElasticsearchStatusException.class, - () -> trainedModelAssignmentClusterService.createModelAssignment(currentState, newParams("new-model", 150, 4, 1)) + () -> trainedModelAssignmentClusterService.createModelAssignment( + currentState, + new CreateTrainedModelAssignmentAction.Request(newParams("new-model", 150, 4, 1), null) + ) ); assertThat( @@ -528,7 +541,7 @@ public void testCreateAssignmentWhileResetModeIsTrue() throws InterruptedExcepti CountDownLatch latch = new CountDownLatch(1); trainedModelAssignmentClusterService.createNewModelAssignment( - newParams("new-model", 150), + new CreateTrainedModelAssignmentAction.Request(newParams("new-model", 150), null), new LatchedActionListener<>( ActionListener.wrap( trainedModelAssignment -> fail("assignment should have failed to be created because reset mode is set"), @@ -560,7 +573,7 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenPreviouslyShuttingDownNode_Is TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -597,7 +610,7 @@ public void testHaveMlNodesChanged_ReturnsTrueWhenNodeShutsDownAndWasRoutedTo() TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -614,7 +627,7 @@ public void testHaveMlNodesChanged_ReturnsTrueWhenNodeShutsDownAndWasRoutedTo() TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -641,7 +654,7 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenNodeShutsDownAndWasRoutedTo_B TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) ) .build() @@ -658,7 +671,7 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenNodeShutsDownAndWasRoutedTo_B TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -700,7 +713,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -747,7 +760,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -759,7 +772,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -781,7 +794,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -793,7 +806,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -815,7 +828,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -827,7 +840,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -851,7 +864,7 @@ public void testDetectReasonToRebalanceModels() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)).stopAssignment("test") + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null).stopAssignment("test") ) .build() ) @@ -864,7 +877,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -886,7 +899,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode2)) @@ -899,7 +912,7 @@ public void testDetectReasonToRebalanceModels() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))) + .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) .build() @@ -923,12 +936,12 @@ public void testDetectReasonToRebalanceModels() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( model2, - TrainedModelAssignment.Builder.empty(newParams("model-2", 100)) + TrainedModelAssignment.Builder.empty(newParams("model-2", 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) @@ -945,12 +958,12 @@ public void testDetectReasonToRebalanceModels() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( model2, - TrainedModelAssignment.Builder.empty(newParams("model-2", 100)) + TrainedModelAssignment.Builder.empty(newParams("model-2", 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) @@ -977,12 +990,12 @@ public void testDetectReasonToRebalanceModels() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( model2, - TrainedModelAssignment.Builder.empty(newParams("model-2", 100)) + TrainedModelAssignment.Builder.empty(newParams("model-2", 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .stopAssignment("test") @@ -1000,12 +1013,12 @@ public void testDetectReasonToRebalanceModels() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( model2, - TrainedModelAssignment.Builder.empty(newParams("model-2", 100)) + TrainedModelAssignment.Builder.empty(newParams("model-2", 100), null) .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) @@ -1032,7 +1045,7 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { TrainedModelAssignmentMetadata fullModelAllocation = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( model1, - TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + TrainedModelAssignment.Builder.empty(newParams(model1, 100), null) .addRoutingEntry(mlNode1.getId(), new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(mlNode2.getId(), new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) @@ -1227,7 +1240,7 @@ public void testDetectReasonToRebalanceModels_GivenSingleMlJobStopped() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100))) + .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) .build() ) .build() @@ -1242,7 +1255,7 @@ public void testDetectReasonToRebalanceModels_GivenSingleMlJobStopped() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100))) + .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) .build() ) .build() @@ -1265,7 +1278,7 @@ public void testDetectReasonToRebalanceModels_GivenOutdatedAssignments() { TrainedModelAssignmentMetadata modelMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(newParams(modelId, 100)) + TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null) .addRoutingEntry(mlNodeId, new RoutingInfo(0, 0, RoutingState.STARTED, "")) ) .build(); @@ -1342,7 +1355,7 @@ public void testDetectReasonToRebalanceModels_GivenMultipleMlJobsStopped() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100))) + .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) .build() ) .build() @@ -1357,7 +1370,7 @@ public void testDetectReasonToRebalanceModels_GivenMultipleMlJobsStopped() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100))) + .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) .build() ) .build() @@ -1419,7 +1432,7 @@ public void testDetectReasonToRebalanceModels_GivenMlJobsStarted() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100))) + .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) .build() ) .build() @@ -1434,7 +1447,7 @@ public void testDetectReasonToRebalanceModels_GivenMlJobsStarted() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100))) + .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) .build() ) .build() @@ -1459,7 +1472,7 @@ public void testAreAssignedNodesRemoved_GivenRemovedNodeThatIsRouted() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) @@ -1491,7 +1504,7 @@ public void testAreAssignedNodesRemoved_GivenRemovedNodeThatIsNotRouted() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build() @@ -1519,7 +1532,7 @@ public void testAreAssignedNodesRemoved_GivenShuttingDownNodeThatIsRouted() { TrainedModelAssignmentMetadata trainedModelAssignmentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) @@ -1563,7 +1576,7 @@ public void testAreAssignedNodesRemoved_GivenShuttingDownNodeThatIsNotRouted() { TrainedModelAssignmentMetadata trainedModelAssignmentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId, 10_000L), null) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -1611,13 +1624,13 @@ public void testRemoveRoutingToUnassignableNodes_RemovesRouteForRemovedNodes() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId1, - TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( modelId2, - TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) @@ -1668,14 +1681,14 @@ public void testRemoveRoutingToUnassignableNodes_AddsAStoppingRouteForShuttingDo TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId1, - TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId3, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( modelId2, - TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId3, new RoutingInfo(1, 1, RoutingState.STARTED, "")) @@ -1728,14 +1741,14 @@ public void testRemoveRoutingToUnassignableNodes_IgnoresARouteThatIsStoppedForSh TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId1, - TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId3, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) ) .addNewAssignment( modelId2, - TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L)) + TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L), null) .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry(nodeId3, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) @@ -1789,12 +1802,12 @@ public void testSetShuttingDownNodeRoutesToStopping_GivenAnAssignmentRoutedToShu TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( availableNodeModelId, - TrainedModelAssignment.Builder.empty(taskParamsRunning) + TrainedModelAssignment.Builder.empty(taskParamsRunning, null) .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( shuttingDownModelId, - TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown, null) .addRoutingEntry(shuttingDownNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -1802,12 +1815,12 @@ public void testSetShuttingDownNodeRoutesToStopping_GivenAnAssignmentRoutedToShu TrainedModelAssignmentMetadata.Builder rebalanced = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( availableNodeModelId, - TrainedModelAssignment.Builder.empty(taskParamsRunning) + TrainedModelAssignment.Builder.empty(taskParamsRunning, null) .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( shuttingDownModelId, - TrainedModelAssignment.Builder.empty(taskParamsRunning) + TrainedModelAssignment.Builder.empty(taskParamsRunning, null) .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ); @@ -1840,12 +1853,12 @@ public void testSetShuttingDownNodeRoutesToStopping_GivenAnAssignmentRoutedToShu TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( shuttingDownModelId, - TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown, null) .addRoutingEntry(shuttingDownNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( notShuttingDownModelId, - TrainedModelAssignment.Builder.empty(taskParamsNotShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsNotShuttingDown, null) .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -1853,12 +1866,12 @@ public void testSetShuttingDownNodeRoutesToStopping_GivenAnAssignmentRoutedToShu TrainedModelAssignmentMetadata.Builder rebalanced = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( shuttingDownModelId, - TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown, null) .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( notShuttingDownModelId, - TrainedModelAssignment.Builder.empty(taskParamsNotShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsNotShuttingDown, null) .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ); @@ -1897,7 +1910,7 @@ public void testSetShuttingDownNodeRoutesToStopping_GivenAnAssignmentRoutedToShu TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown, null) .addRoutingEntry(disappearingNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -1905,7 +1918,7 @@ public void testSetShuttingDownNodeRoutesToStopping_GivenAnAssignmentRoutedToShu TrainedModelAssignmentMetadata.Builder rebalanced = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown, null) .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ); @@ -1933,7 +1946,7 @@ public void testSetShuttingDownNodeRoutesToStopping_GivenAssignmentDoesNotExist_ TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown, null) .addRoutingEntry(shuttingDownNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -2006,7 +2019,10 @@ public void testSetAllocationToStopping() { .putCustom( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, randomNonNegativeLong()))) + .addNewAssignment( + modelId, + TrainedModelAssignment.Builder.empty(newParams(modelId, randomNonNegativeLong()), null) + ) .build() ) .build() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java index 6c5223eae4d99..dec85bff87d67 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java @@ -64,7 +64,7 @@ public void testIsAssigned() { TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( allocatedDeploymentId, - TrainedModelAssignment.Builder.empty(randomParams(allocatedDeploymentId, allocatedModelId)) + TrainedModelAssignment.Builder.empty(randomParams(allocatedDeploymentId, allocatedModelId), null) ) .build(); assertThat(metadata.isAssigned(allocatedDeploymentId), is(true)); @@ -78,7 +78,7 @@ public void testModelIsDeployed() { TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( allocatedDeploymentId, - TrainedModelAssignment.Builder.empty(randomParams(allocatedDeploymentId, allocatedModelId)) + TrainedModelAssignment.Builder.empty(randomParams(allocatedDeploymentId, allocatedModelId), null) ) .build(); assertThat(metadata.modelIsDeployed(allocatedDeploymentId), is(false)); @@ -92,9 +92,9 @@ public void testGetDeploymentsUsingModel() { String deployment2 = "test_deployment_2"; String deployment3 = "test_deployment_3"; TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(deployment1, TrainedModelAssignment.Builder.empty(randomParams(deployment1, modelId1))) - .addNewAssignment(deployment2, TrainedModelAssignment.Builder.empty(randomParams(deployment2, modelId1))) - .addNewAssignment(deployment3, TrainedModelAssignment.Builder.empty(randomParams(deployment3, "different_model"))) + .addNewAssignment(deployment1, TrainedModelAssignment.Builder.empty(randomParams(deployment1, modelId1), null)) + .addNewAssignment(deployment2, TrainedModelAssignment.Builder.empty(randomParams(deployment2, modelId1), null)) + .addNewAssignment(deployment3, TrainedModelAssignment.Builder.empty(randomParams(deployment3, "different_model"), null)) .build(); var assignments = metadata.getDeploymentsUsingModel(modelId1); assertThat(assignments, hasSize(2)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java index 2444134ce2920..9fbc2b43f1137 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java @@ -49,10 +49,12 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME; import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentClusterServiceTests.shutdownMetadata; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -91,19 +93,13 @@ public void setupObjects() { taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); deploymentManager = mock(DeploymentManager.class); doAnswer(invocationOnMock -> { - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; - listener.onResponse(invocationOnMock.getArguments()[0]); + ActionListener listener = invocationOnMock.getArgument(1); + listener.onResponse(invocationOnMock.getArgument(0)); return null; }).when(deploymentManager).startDeployment(any(), any()); doAnswer(invocationOnMock -> { - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; - listener.onResponse(null); - return null; - }).when(deploymentManager).stopAfterCompletingPendingWork(any()); - - doAnswer(invocationOnMock -> { - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + ActionListener listener = invocationOnMock.getArgument(1); listener.onResponse(AcknowledgedResponse.TRUE); return null; }).when(trainedModelAssignmentService).updateModelAssignmentState(any(), any()); @@ -114,15 +110,33 @@ public void shutdown() throws InterruptedException { terminate(threadPool); } - public void testLoadQueuedModels_GivenNoQueuedModels() { - TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); - + public void testLoadQueuedModels_GivenNoQueuedModels() throws InterruptedException { // When there are no queued models - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(createService()); verify(deploymentManager, never()).startDeployment(any(), any()); } - public void testLoadQueuedModels() { + private void loadQueuedModels(TrainedModelAssignmentNodeService trainedModelAssignmentNodeService) throws InterruptedException { + loadQueuedModels(trainedModelAssignmentNodeService, false); + } + + private void loadQueuedModels(TrainedModelAssignmentNodeService trainedModelAssignmentNodeService, boolean expectedRunImmediately) + throws InterruptedException { + var latch = new CountDownLatch(1); + var actual = new AtomicReference(); // AtomicReference for nullable + trainedModelAssignmentNodeService.loadQueuedModels( + ActionListener.runAfter(ActionListener.wrap(actual::set, e -> {}), latch::countDown) + ); + assertTrue("Timed out waiting for loadQueuedModels to finish.", latch.await(10, TimeUnit.SECONDS)); + assertThat("Test failed to call the onResponse handler.", actual.get(), notNullValue()); + assertThat( + "We should rerun immediately if there are still model loading tasks to process.", + actual.get(), + equalTo(expectedRunImmediately) + ); + } + + public void testLoadQueuedModels() throws InterruptedException { TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); String modelToLoad = "loading-model"; @@ -136,7 +150,8 @@ public void testLoadQueuedModels() { trainedModelAssignmentNodeService.prepareModelToLoad(newParams(deploymentId, modelToLoad)); trainedModelAssignmentNodeService.prepareModelToLoad(newParams(anotherDeployment, anotherModel)); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService, true); + loadQueuedModels(trainedModelAssignmentNodeService, false); ArgumentCaptor taskCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); ArgumentCaptor requestCapture = ArgumentCaptor.forClass( @@ -157,11 +172,11 @@ public void testLoadQueuedModels() { // Since models are loaded, there shouldn't be any more loadings to occur trainedModelAssignmentNodeService.prepareModelToLoad(newParams(anotherDeployment, anotherModel)); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService); verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } - public void testLoadQueuedModelsWhenFailureIsRetried() { + public void testLoadQueuedModelsWhenFailureIsRetried() throws InterruptedException { String modelToLoad = "loading-model"; String failedModelToLoad = "failed-search-loading-model"; String deploymentId = "foo"; @@ -174,9 +189,9 @@ public void testLoadQueuedModelsWhenFailureIsRetried() { trainedModelAssignmentNodeService.prepareModelToLoad(newParams(deploymentId, modelToLoad)); trainedModelAssignmentNodeService.prepareModelToLoad(newParams(failedDeploymentId, failedModelToLoad)); - trainedModelAssignmentNodeService.loadQueuedModels(); - - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService, true); + loadQueuedModels(trainedModelAssignmentNodeService, false); + loadQueuedModels(trainedModelAssignmentNodeService, false); ArgumentCaptor startTaskCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); ArgumentCaptor requestCapture = ArgumentCaptor.forClass( @@ -199,7 +214,7 @@ public void testLoadQueuedModelsWhenFailureIsRetried() { verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } - public void testLoadQueuedModelsWhenStopped() { + public void testLoadQueuedModelsWhenStopped() throws InterruptedException { TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); // When there are no queued models @@ -209,7 +224,12 @@ public void testLoadQueuedModelsWhenStopped() { trainedModelAssignmentNodeService.prepareModelToLoad(newParams(modelToLoad, modelToLoad)); trainedModelAssignmentNodeService.stop(); - trainedModelAssignmentNodeService.loadQueuedModels(); + var latch = new CountDownLatch(1); + trainedModelAssignmentNodeService.loadQueuedModels(ActionListener.running(latch::countDown)); + assertTrue( + "loadQueuedModels should immediately call the listener without forking to another thread.", + latch.await(0, TimeUnit.SECONDS) + ); verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } @@ -231,7 +251,8 @@ public void testLoadQueuedModelsWhenTaskIsStopped() throws Exception { trainedModelAssignmentNodeService.prepareModelToLoad(newParams(loadingDeploymentId, modelToLoad)); trainedModelAssignmentNodeService.prepareModelToLoad(newParams(stoppedLoadingDeploymentId, stoppedModelToLoad)); trainedModelAssignmentNodeService.getTask(stoppedLoadingDeploymentId).stop("testing", false, ActionListener.noop()); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService, true); + loadQueuedModels(trainedModelAssignmentNodeService, false); assertBusy(() -> { ArgumentCaptor stoppedTaskCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); @@ -283,15 +304,8 @@ public void testLoadQueuedModelsWhenOneFails() throws InterruptedException { trainedModelAssignmentNodeService.prepareModelToLoad(newParams(loadingDeploymentId, modelToLoad)); trainedModelAssignmentNodeService.prepareModelToLoad(newParams(failedLoadingDeploymentId, failedModelToLoad)); - CountDownLatch latch = new CountDownLatch(1); - doAnswer(invocationOnMock -> { - latch.countDown(); - return null; - }).when(deploymentManager).stopDeployment(any()); - - trainedModelAssignmentNodeService.loadQueuedModels(); - - latch.await(5, TimeUnit.SECONDS); + loadQueuedModels(trainedModelAssignmentNodeService, true); + loadQueuedModels(trainedModelAssignmentNodeService, false); ArgumentCaptor startTaskCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); ArgumentCaptor requestCapture = ArgumentCaptor.forClass( @@ -318,7 +332,7 @@ public void testLoadQueuedModelsWhenOneFails() throws InterruptedException { verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } - public void testClusterChangedWithResetMode() { + public void testClusterChangedWithResetMode() throws InterruptedException { final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build(); String modelOne = "model-1"; @@ -339,17 +353,17 @@ public void testClusterChangedWithResetMode() { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelOne, - TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)) + TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( modelTwo, - TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo)) + TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( notUsedModel, - TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel)) + TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel), null) .addRoutingEntry("some-other-node", new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -362,7 +376,7 @@ public void testClusterChangedWithResetMode() { ); trainedModelAssignmentNodeService.clusterChanged(event); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService); verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } @@ -397,7 +411,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNode_CallsStop TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(taskParams) + TrainedModelAssignment.Builder.empty(taskParams, null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) ) .build() @@ -450,7 +464,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNode_ButOtherA TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(taskParams) + TrainedModelAssignment.Builder.empty(taskParams, null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) .addRoutingEntry(node2, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) @@ -480,7 +494,6 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeButAlready String modelOne = "model-1"; String deploymentOne = "deployment-1"; - ArgumentCaptor stopParamsCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); var taskParams = newParams(deploymentOne, modelOne); ClusterChangedEvent event = new ClusterChangedEvent( @@ -494,7 +507,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeButAlready TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(taskParams) + TrainedModelAssignment.Builder.empty(taskParams, null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) ) .build() @@ -535,7 +548,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeWithStarti TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(taskParams) + TrainedModelAssignment.Builder.empty(taskParams, null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -558,7 +571,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeWithStarti verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } - public void testClusterChanged_WhenAssigmentIsStopping_DoesNotAddModelToBeLoaded() { + public void testClusterChanged_WhenAssigmentIsStopping_DoesNotAddModelToBeLoaded() throws InterruptedException { final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build(); String modelOne = "model-1"; @@ -577,7 +590,7 @@ public void testClusterChanged_WhenAssigmentIsStopping_DoesNotAddModelToBeLoaded TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(taskParams) + TrainedModelAssignment.Builder.empty(taskParams, null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .stopAssignment("stopping") ) @@ -592,7 +605,7 @@ public void testClusterChanged_WhenAssigmentIsStopping_DoesNotAddModelToBeLoaded // trainedModelAssignmentNodeService.prepareModelToLoad(taskParams); trainedModelAssignmentNodeService.clusterChanged(event); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService); verify(deploymentManager, never()).startDeployment(any(), any()); verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); @@ -626,12 +639,12 @@ public void testClusterChanged() throws Exception { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)) + TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( deploymentTwo, - TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo)) + TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .updateExistingRoutingEntry( NODE_ID, @@ -645,7 +658,7 @@ public void testClusterChanged() throws Exception { ) .addNewAssignment( previouslyUsedDeployment, - TrainedModelAssignment.Builder.empty(newParams(previouslyUsedDeployment, previouslyUsedModel)) + TrainedModelAssignment.Builder.empty(newParams(previouslyUsedDeployment, previouslyUsedModel), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) .updateExistingRoutingEntry( NODE_ID, @@ -659,7 +672,7 @@ public void testClusterChanged() throws Exception { ) .addNewAssignment( notUsedDeployment, - TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel)) + TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel), null) .addRoutingEntry("some-other-node", new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -684,17 +697,17 @@ public void testClusterChanged() throws Exception { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)) + TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( deploymentTwo, - TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo)) + TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo), null) .addRoutingEntry("some-other-node", new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .addNewAssignment( notUsedDeployment, - TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel)) + TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel), null) .addRoutingEntry("some-other-node", new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -706,7 +719,8 @@ public void testClusterChanged() throws Exception { ); trainedModelAssignmentNodeService.clusterChanged(event); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService, true); + loadQueuedModels(trainedModelAssignmentNodeService, false); assertBusy(() -> { ArgumentCaptor stoppedTaskCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); @@ -737,7 +751,7 @@ public void testClusterChanged() throws Exception { TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)) + TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) ) .build() @@ -749,7 +763,7 @@ public void testClusterChanged() throws Exception { ); trainedModelAssignmentNodeService.clusterChanged(event); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService); verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } @@ -764,7 +778,8 @@ public void testClusterChanged_GivenAllStartedAssignments_AndNonMatchingTargetAl givenAssignmentsInClusterStateForModels(List.of(deploymentOne, deploymentTwo), List.of(modelOne, modelTwo)); trainedModelAssignmentNodeService.prepareModelToLoad(newParams(deploymentOne, modelOne)); trainedModelAssignmentNodeService.prepareModelToLoad(newParams(deploymentTwo, modelTwo)); - trainedModelAssignmentNodeService.loadQueuedModels(); + loadQueuedModels(trainedModelAssignmentNodeService, true); + loadQueuedModels(trainedModelAssignmentNodeService, false); ClusterChangedEvent event = new ClusterChangedEvent( "shouldUpdateAllocations", @@ -778,12 +793,12 @@ public void testClusterChanged_GivenAllStartedAssignments_AndNonMatchingTargetAl TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentOne, - TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)) + TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne), null) .addRoutingEntry(NODE_ID, new RoutingInfo(1, 3, RoutingState.STARTED, "")) ) .addNewAssignment( deploymentTwo, - TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo)) + TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo), null) .addRoutingEntry(NODE_ID, new RoutingInfo(2, 1, RoutingState.STARTED, "")) ) .build() @@ -830,7 +845,7 @@ private void givenAssignmentsInClusterStateForModels(List deploymentIds, for (int i = 0; i < modelIds.size(); i++) { builder.addNewAssignment( deploymentIds.get(i), - TrainedModelAssignment.Builder.empty(newParams(deploymentIds.get(i), modelIds.get(i))) + TrainedModelAssignment.Builder.empty(newParams(deploymentIds.get(i), modelIds.get(i)), null) .addRoutingEntry("test-node", new RoutingInfo(1, 1, RoutingState.STARTING, "")) ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java index 53b737b38c284..65a974e04045e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; @@ -61,11 +62,12 @@ public void testRebalance_GivenAllAssignmentsAreSatisfied_ShouldMakeNoChanges() TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentId1, - TrainedModelAssignment.Builder.empty(taskParams1).addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) + TrainedModelAssignment.Builder.empty(taskParams1, null) + .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( deploymentId2, - TrainedModelAssignment.Builder.empty(taskParams2) + TrainedModelAssignment.Builder.empty(taskParams2, null) .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(3, 3, RoutingState.STARTED, "")) ) @@ -101,11 +103,12 @@ public void testRebalance_GivenAllAssignmentsAreSatisfied_GivenOutdatedRoutingEn TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deploymentId1, - TrainedModelAssignment.Builder.empty(taskParams1).addRoutingEntry("node-1", new RoutingInfo(0, 0, RoutingState.STARTED, "")) + TrainedModelAssignment.Builder.empty(taskParams1, null) + .addRoutingEntry("node-1", new RoutingInfo(0, 0, RoutingState.STARTED, "")) ) .addNewAssignment( deploymentId2, - TrainedModelAssignment.Builder.empty(taskParams2) + TrainedModelAssignment.Builder.empty(taskParams2, null) .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(3, 3, RoutingState.STARTED, "")) ) @@ -140,11 +143,18 @@ public void testRebalance_GivenModelToAddAlreadyExists() { String modelId = "model-to-add"; StartTrainedModelDeploymentAction.TaskParams taskParams = normalPriorityParams(modelId, modelId, 1024L, 1, 1); TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(taskParams)) + .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(taskParams, null)) .build(); expectThrows( ResourceAlreadyExistsException.class, - () -> new TrainedModelAssignmentRebalancer(currentMetadata, Map.of(), Map.of(), Optional.of(taskParams), 1, false).rebalance() + () -> new TrainedModelAssignmentRebalancer( + currentMetadata, + Map.of(), + Map.of(), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), + 1, + false + ).rebalance() ); } @@ -157,7 +167,7 @@ public void testRebalance_GivenFirstModelToAdd_NoMLNodes() throws Exception { currentMetadata, Map.of(), Map.of(), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -185,7 +195,7 @@ public void testRebalance_GivenFirstModelToAdd_NotEnoughProcessors() throws Exce currentMetadata, nodeLoads, Map.of(List.of(), List.of(node)), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -222,7 +232,7 @@ public void testRebalance_GivenFirstModelToAdd_NotEnoughMemory() throws Exceptio currentMetadata, nodeLoads, Map.of(), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -259,7 +269,7 @@ public void testRebalance_GivenFirstModelToAdd_ErrorDetectingNodeLoad() throws E currentMetadata, nodeLoads, Map.of(), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -296,7 +306,7 @@ public void testRebalance_GivenProblemsOnMultipleNodes() throws Exception { currentMetadata, nodeLoads, Map.of(List.of(), List.of(node1, node2)), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -330,7 +340,7 @@ public void testRebalance_GivenFirstModelToAdd_FitsFully() throws Exception { currentMetadata, nodeLoads, Map.of(List.of(), List.of(node1)), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -357,7 +367,7 @@ public void testRebalance_GivenModelToAdd_AndPreviousAssignments_AndTwoNodes_All TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( previousDeploymentId, - TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeploymentId, previousDeploymentId, 1024L, 3, 2)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeploymentId, previousDeploymentId, 1024L, 3, 2), null) .addRoutingEntry("node-1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) @@ -370,7 +380,7 @@ public void testRebalance_GivenModelToAdd_AndPreviousAssignments_AndTwoNodes_All currentMetadata, nodeLoads, Map.of(List.of(), List.of(node1, node2)), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -416,13 +426,13 @@ public void testRebalance_GivenPreviousAssignments_AndNewNode() throws Exception TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( previousDeployment1Id, - TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment1Id, 1024L, 3, 2)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment1Id, 1024L, 3, 2), null) .addRoutingEntry("node-1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( previousDeployment2Id, - TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment2Id, 1024L, 4, 1)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment2Id, 1024L, 4, 1), null) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -483,13 +493,13 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( previousDeployment1Id, - TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment1Id, 1024L, 3, 2)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment1Id, 1024L, 3, 2), null) .addRoutingEntry("node-1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( previousDeployment2Id, - TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment2Id, 1024L, 4, 1)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment2Id, 1024L, 4, 1), null) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -554,13 +564,13 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( previousDeployment1Id, - TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment1Id, 1024L, 3, 2)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment1Id, 1024L, 3, 2), null) .addRoutingEntry("node-1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .addNewAssignment( previousDeployment2Id, - TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment2Id, 1024L, 1, 1)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(previousDeployment2Id, 1024L, 1, 1), null) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -610,7 +620,7 @@ public void testRebalance_GivenFailedAssignment_RestartsAssignment() throws Exce TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId, - TrainedModelAssignment.Builder.empty(normalPriorityParams(modelId, 1024L, 1, 1)) + TrainedModelAssignment.Builder.empty(normalPriorityParams(modelId, 1024L, 1, 1), null) .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.FAILED, "some error")) ) .build(); @@ -656,7 +666,7 @@ public void testRebalance_GivenLowPriorityModelToAdd_OnlyModel_NotEnoughMemory() currentMetadata, nodeLoads, Map.of(), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); @@ -693,7 +703,7 @@ public void testRebalance_GivenLowPriorityModelToAdd_NotEnoughMemoryNorProcessor TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( deployment2, - TrainedModelAssignment.Builder.empty(taskParams2) + TrainedModelAssignment.Builder.empty(taskParams2, null) .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) .addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) @@ -703,7 +713,7 @@ public void testRebalance_GivenLowPriorityModelToAdd_NotEnoughMemoryNorProcessor currentMetadata, nodeLoads, Map.of(List.of("zone-1"), List.of(node1), List.of("zone-2"), List.of(node2)), - Optional.of(taskParams1), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams1, null)), 1, false ).rebalance().build(); @@ -735,8 +745,8 @@ public void testRebalance_GivenMixedPriorityModels_NotEnoughMemoryForLowPriority String modelId2 = "model-2"; StartTrainedModelDeploymentAction.TaskParams taskParams2 = normalPriorityParams(modelId2, ByteSizeValue.ofMb(300).getBytes(), 1, 1); TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1)) - .addNewAssignment(modelId2, TrainedModelAssignment.Builder.empty(taskParams2)) + .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1, null)) + .addNewAssignment(modelId2, TrainedModelAssignment.Builder.empty(taskParams2, null)) .build(); TrainedModelAssignmentMetadata result = new TrainedModelAssignmentRebalancer( @@ -786,10 +796,11 @@ public void testRebalance_GivenMixedPriorityModels_TwoZones_EachNodeCanHoldOneMo String modelId2 = "model-2"; StartTrainedModelDeploymentAction.TaskParams taskParams2 = normalPriorityParams(modelId2, ByteSizeValue.ofMb(300).getBytes(), 1, 1); TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1)) + .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1, null)) .addNewAssignment( modelId2, - TrainedModelAssignment.Builder.empty(taskParams2).addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) + TrainedModelAssignment.Builder.empty(taskParams2, null) + .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -844,8 +855,8 @@ public void testRebalance_GivenModelUsingAllCpu_FittingLowPriorityModelCanStart( String modelId2 = "model-2"; StartTrainedModelDeploymentAction.TaskParams taskParams2 = normalPriorityParams(modelId2, ByteSizeValue.ofMb(300).getBytes(), 1, 1); TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1)) - .addNewAssignment(modelId2, TrainedModelAssignment.Builder.empty(taskParams2)) + .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1, null)) + .addNewAssignment(modelId2, TrainedModelAssignment.Builder.empty(taskParams2, null)) .build(); TrainedModelAssignmentMetadata result = new TrainedModelAssignmentRebalancer( @@ -895,8 +906,8 @@ public void testRebalance_GivenMultipleLowPriorityModels_AndMultipleNodes() thro String modelId2 = "model-2"; StartTrainedModelDeploymentAction.TaskParams taskParams2 = lowPriorityParams(modelId2, ByteSizeValue.ofMb(100).getBytes()); TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() - .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1)) - .addNewAssignment(modelId2, TrainedModelAssignment.Builder.empty(taskParams2)) + .addNewAssignment(modelId1, TrainedModelAssignment.Builder.empty(taskParams1, null)) + .addNewAssignment(modelId2, TrainedModelAssignment.Builder.empty(taskParams2, null)) .build(); TrainedModelAssignmentMetadata result = new TrainedModelAssignmentRebalancer( @@ -946,7 +957,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_EvictsLowPriorityModel( TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId1, - TrainedModelAssignment.Builder.empty(taskParams1).addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) + TrainedModelAssignment.Builder.empty(taskParams1, null) + .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -954,7 +966,7 @@ public void testRebalance_GivenNormalPriorityModelToLoad_EvictsLowPriorityModel( currentMetadata, nodeLoads, Map.of(List.of(), List.of(node1)), - Optional.of(taskParams2), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams2, null)), 1, false ).rebalance().build(); @@ -999,7 +1011,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelCanS TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId1, - TrainedModelAssignment.Builder.empty(taskParams1).addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) + TrainedModelAssignment.Builder.empty(taskParams1, null) + .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -1007,7 +1020,7 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelCanS currentMetadata, nodeLoads, Map.of(List.of(), List.of(node1, node2)), - Optional.of(taskParams2), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams2, null)), 1, false ).rebalance().build(); @@ -1052,7 +1065,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelMust TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( modelId1, - TrainedModelAssignment.Builder.empty(taskParams1).addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) + TrainedModelAssignment.Builder.empty(taskParams1, null) + .addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) ) .build(); @@ -1060,7 +1074,7 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelMust currentMetadata, nodeLoads, Map.of(List.of(), List.of(node1, node2)), - Optional.of(taskParams2), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams2, null)), 1, false ).rebalance().build(); @@ -1107,7 +1121,7 @@ public void testRebalance_GivenFirstModelToAdd_GivenScalingProcessorSetting() { currentMetadata, nodeLoads, Map.of(List.of(), List.of(node)), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 2, false ).rebalance().build(); @@ -1130,7 +1144,7 @@ public void testRebalance_GivenFirstModelToAdd_GivenScalingProcessorSetting() { currentMetadata, nodeLoads, Map.of(List.of(), List.of(node)), - Optional.of(taskParams), + Optional.of(new CreateTrainedModelAssignmentAction.Request(taskParams, null)), 1, false ).rebalance().build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducerTests.java index 85fc83f775670..603eda65fbd51 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducerTests.java @@ -181,7 +181,8 @@ private static TrainedModelAssignment createAssignment( Priority.NORMAL, randomNonNegativeLong(), randomNonNegativeLong() - ) + ), + null ); allocationsByNode.entrySet() .stream() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java index cbbb38f1d1ddd..d84c04f0c41f1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java @@ -25,14 +25,14 @@ public class AssignmentPlanTests extends ESTestCase { public void testBuilderCtor_GivenDuplicateNode() { Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, null, 0, 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n, n), List.of(m))); } public void testBuilderCtor_GivenDuplicateModel() { Node n = new Node("n_1", 100, 4); - Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, 0, 0); + Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, null, 0, 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n), List.of(m, m))); } @@ -41,7 +41,17 @@ public void testAssignModelToNode_GivenNoPreviousAssignment() { Node n = new Node("n_1", ByteSizeValue.ofMb(350).getBytes(), 4); { // old memory format - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(40).getBytes(), 1, 2, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(40).getBytes(), + 1, + 2, + Map.of(), + 0, + null, + 0, + 0 + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -71,6 +81,7 @@ public void testAssignModelToNode_GivenNoPreviousAssignment() { 2, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(30).getBytes() ); @@ -107,6 +118,7 @@ public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() { 2, Map.of("n_1", 1), 0, + null, 0, 0 ); @@ -134,6 +146,7 @@ public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() { 2, Map.of("n_1", 1), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(25).getBytes() ); @@ -160,7 +173,7 @@ public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment() Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 4); { // old memory format - Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 0, 0, 0); + Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 0, null, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -186,6 +199,7 @@ public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment() 2, Map.of("n_1", 2), 0, + null, ByteSizeValue.ofMb(250).getBytes(), ByteSizeValue.ofMb(25).getBytes() ); @@ -209,7 +223,7 @@ public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment() public void testAssignModelToNode_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", ByteSizeValue.ofMb(340 - 1).getBytes(), 4); - Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 2, Map.of(), 0, 0, 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 2, Map.of(), 0, null, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 1)); @@ -227,6 +241,7 @@ public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() { 2, Map.of("n_1", 1), 0, + null, 0, 0 ); @@ -249,6 +264,7 @@ public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() { 2, Map.of("n_1", 1), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(5).getBytes() ); @@ -266,7 +282,7 @@ public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() { public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocation() { Node n = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 4); - Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 5, 1, Map.of(), 0, 0, 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 5, 1, Map.of(), 0, null, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 5)); @@ -279,7 +295,17 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocati public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAllocation() { Node n = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 3, 2, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 3, + 2, + Map.of(), + 0, + null, + 0, + 0 + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 3)); @@ -299,6 +325,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { 2, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ); @@ -335,7 +362,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of(), 0, null, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -346,7 +373,7 @@ public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); { // old memory format - Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(31).getBytes(), 1, 1, Map.of("n_1", 1), 0, 0, 0); + Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(31).getBytes(), 1, 1, Map.of("n_1", 1), 0, null, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); assertThat(builder.canAssign(m, n, 1), is(true)); } @@ -359,6 +386,7 @@ public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { 1, Map.of("n_1", 1), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -369,7 +397,17 @@ public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { public void testCanAssign_GivenEnoughMemory() { Node n = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 3, 2, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 3, + 2, + Map.of(), + 0, + null, + 0, + 0 + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -384,7 +422,7 @@ public void testCompareTo_GivenDifferenceInPreviousAssignments() { Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); { - Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 2), 0, 0, 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 2), 0, null, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planSatisfyingPreviousAssignments = builder.build(); @@ -397,6 +435,7 @@ public void testCompareTo_GivenDifferenceInPreviousAssignments() { 2, Map.of("n_1", 3), 0, + null, 0, 0 ); @@ -420,6 +459,7 @@ public void testCompareTo_GivenDifferenceInAllocations() { 2, Map.of("n_1", 1), 0, + null, 0, 0 ); @@ -445,7 +485,7 @@ public void testCompareTo_GivenDifferenceInMemory() { Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); { - Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 1), 0, 0, 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 1), 0, null, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingMoreMemory = builder.build(); @@ -458,6 +498,7 @@ public void testCompareTo_GivenDifferenceInMemory() { 2, Map.of("n_1", 1), 0, + null, 0, 0 ); @@ -482,6 +523,7 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { 2, Map.of(), 0, + null, 0, 0 ); @@ -492,6 +534,7 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { 1, Map.of(), 0, + null, 0, 0 ); @@ -502,6 +545,7 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { 1, Map.of(), 0, + null, 0, 0 ); @@ -522,6 +566,7 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { 2, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -532,6 +577,7 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { 1, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -542,6 +588,7 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { 1, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -558,9 +605,9 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 2) @@ -573,9 +620,9 @@ public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 1) @@ -586,8 +633,8 @@ public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, null, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) .assignModelToNode(deployment1, node1, 1) .build(); @@ -597,7 +644,7 @@ public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { public void testCountPreviouslyAssignedThatAreStillAssigned() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); + Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, null, 0, 0); AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( "m_2", ByteSizeValue.ofMb(30).getBytes(), @@ -605,6 +652,7 @@ public void testCountPreviouslyAssignedThatAreStillAssigned() { 1, Map.of(), 4, + null, 0, 0 ); @@ -615,6 +663,7 @@ public void testCountPreviouslyAssignedThatAreStillAssigned() { 1, Map.of(), 1, + null, 0, 0 ); @@ -625,6 +674,7 @@ public void testCountPreviouslyAssignedThatAreStillAssigned() { 1, Map.of(), 0, + null, 0, 0 ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java index bc94144bce1c5..ef76c388b81a1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java @@ -42,7 +42,7 @@ private static long scaleNodeSize(long nodeMemory) { public void testModelThatDoesNotFitInMemory() { { // Without perDeploymentMemory and perAllocationMemory specified List nodes = List.of(new Node("n_1", scaleNodeSize(50), 4)); - Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(51).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(51).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); assertThat(plan.assignments(deployment), isEmpty()); } @@ -55,6 +55,7 @@ public void testModelThatDoesNotFitInMemory() { 1, Map.of(), 0, + null, ByteSizeValue.ofMb(250).getBytes(), ByteSizeValue.ofMb(51).getBytes() ); @@ -65,7 +66,7 @@ public void testModelThatDoesNotFitInMemory() { public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { List nodes = List.of(new Node("n_1", scaleNodeSize(100), 4), new Node("n_2", scaleNodeSize(100), 5)); - Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(1).getBytes(), 1, 6, Map.of(), 0, 0, 0); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(1).getBytes(), 1, 6, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); assertThat(plan.assignments(deployment), isEmpty()); } @@ -73,13 +74,13 @@ public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { public void testSingleModelThatFitsFullyOnSingleNode() { { Node node = new Node("n_1", scaleNodeSize(100), 4); - Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } { Node node = new Node("n_1", scaleNodeSize(1000), 8); - Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(1000).getBytes(), 8, 1, Map.of(), 0, 0, 0); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(1000).getBytes(), 8, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } @@ -92,6 +93,7 @@ public void testSingleModelThatFitsFullyOnSingleNode() { 16, Map.of(), 0, + null, 0, 0 ); @@ -100,7 +102,7 @@ public void testSingleModelThatFitsFullyOnSingleNode() { } { Node node = new Node("n_1", scaleNodeSize(100), 4); - Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } @@ -116,6 +118,7 @@ public void testSingleModelThatFitsFullyOnSingleNode_NewMemoryFields() { 1, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(100).getBytes() ); @@ -131,6 +134,7 @@ public void testSingleModelThatFitsFullyOnSingleNode_NewMemoryFields() { 1, Map.of(), 0, + null, ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofMb(100).getBytes() ); @@ -142,7 +146,7 @@ public void testSingleModelThatFitsFullyOnSingleNode_NewMemoryFields() { public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFullyAssignedOnOneNode() { Node node1 = new Node("n_1", scaleNodeSize(100), 4); Node node2 = new Node("n_2", scaleNodeSize(100), 4); - AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 4, 1, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); @@ -164,6 +168,7 @@ public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFully 1, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(150).getBytes() ); @@ -179,7 +184,7 @@ public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFully } public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerAllocation() { - AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 1, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 1, Map.of(), 0, null, 0, 0); // Single node { Node node = new Node("n_1", scaleNodeSize(100), 4); @@ -220,6 +225,7 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA 1, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(100).getBytes() ); @@ -260,10 +266,10 @@ public void testMultipleModelsAndNodesWithSingleSolution() { Node node2 = new Node("n_2", 2 * scaleNodeSize(50), 7); Node node3 = new Node("n_3", 2 * scaleNodeSize(50), 2); Node node4 = new Node("n_4", 2 * scaleNodeSize(50), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 4, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 2, 3, Map.of(), 0, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, 0, 0); - Deployment deployment4 = new Deployment("m_4", ByteSizeValue.ofMb(50).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 4, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 2, 3, Map.of(), 0, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, null, 0, 0); + Deployment deployment4 = new Deployment("m_4", ByteSizeValue.ofMb(50).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new AssignmentPlanner( List.of(node1, node2, node3, node4), @@ -322,6 +328,7 @@ public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() { 4, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ); @@ -332,6 +339,7 @@ public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() { 3, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ); @@ -342,6 +350,7 @@ public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() { 2, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ); @@ -352,6 +361,7 @@ public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() { 1, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ); @@ -402,7 +412,7 @@ public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() { } public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerAllocation() { - Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 3, Map.of(), 0, 0, 0); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 3, Map.of(), 0, null, 0, 0); // Single node { Node node = new Node("n_1", scaleNodeSize(100), 4); @@ -443,6 +453,7 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA 3, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ); @@ -487,6 +498,7 @@ public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() { 1, Map.of("n_1", 4), 0, + null, 0, 0 ); @@ -506,18 +518,18 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation() { new Node("n_6", ByteSizeValue.ofGb(32).getBytes(), 16) ); List deployments = List.of( - new Deployment("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0, 0, 0), - new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0, 0, 0), - new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0, 0, 0), - new Deployment("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0, 0, 0), - new Deployment("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0, 0, 0), - new Deployment("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0, 0, 0), - new AssignmentPlan.Deployment("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0, 0, 0), - new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, 0, 0), - new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, 0, 0), - new AssignmentPlan.Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, 0, 0), - new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, 0, 0), - new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, 0, 0) + new Deployment("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0, null, 0, 0), + new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0, null, 0, 0), + new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0, null, 0, 0), + new Deployment("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0, null, 0, 0), + new AssignmentPlan.Deployment("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0, null, 0, 0), + new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, null, 0, 0), + new AssignmentPlan.Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, null, 0, 0) ); AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, deployments).computePlan(); @@ -550,10 +562,11 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_New 1, Map.of("n_1", 5), 0, + null, ByteSizeValue.ofMb(400).getBytes(), ByteSizeValue.ofMb(100).getBytes() ), - new Deployment("m_2", ByteSizeValue.ofMb(100).getBytes(), 3, 1, Map.of("n_3", 2), 0, 0, 0), + new Deployment("m_2", ByteSizeValue.ofMb(100).getBytes(), 3, 1, Map.of("n_3", 2), 0, null, 0, 0), new Deployment( "m_3", ByteSizeValue.ofMb(50).getBytes(), @@ -561,6 +574,7 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_New 1, Map.of(), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ), @@ -571,6 +585,7 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_New 1, Map.of("n_3", 2), 0, + null, ByteSizeValue.ofMb(400).getBytes(), ByteSizeValue.ofMb(100).getBytes() ), @@ -581,6 +596,7 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_New 1, Map.of(), 0, + null, ByteSizeValue.ofMb(800).getBytes(), ByteSizeValue.ofMb(100).getBytes() ), @@ -591,6 +607,7 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_New 1, Map.of(), 0, + null, ByteSizeValue.ofMb(50).getBytes(), ByteSizeValue.ofMb(20).getBytes() ), @@ -601,14 +618,15 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_New 1, Map.of("n_2", 6), 0, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(50).getBytes() ), - new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, 0, 0), - new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, 0, 0), - new Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, 0, 0), - new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, 0, 0), - new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, 0, 0) + new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, null, 0, 0), + new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, null, 0, 0) ); AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, deployments).computePlan(); @@ -718,6 +736,7 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode m.threadsPerAllocation(), previousAssignments, 0, + null, 0, 0 ) @@ -741,10 +760,11 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss 1, Map.of("n_1", 2, "n_2", 1), 0, + null, 0, 0 ); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment1, deployment2)) .computePlan(); assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); @@ -776,6 +796,7 @@ public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously( 1, Map.of("n_1", 2, "n_2", 1), 3, + null, 0, 0 ); @@ -786,6 +807,7 @@ public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously( 2, Map.of(), 1, + null, 0, 0 ); @@ -807,8 +829,8 @@ public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously( public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() { Node node1 = new Node("n_1", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); - AssignmentPlan.Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1, 0, 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1, 0, 0); + AssignmentPlan.Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1, null, 0, 0); + AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1, null, 0, 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1), List.of(deployment1, deployment2)).computePlan(); @@ -818,9 +840,9 @@ public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() { public void testGivenClusterResize_AllocationShouldNotExceedMemoryConstraints() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(1840).getBytes(), 2); Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); - Deployment deployment2 = new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); // First only start m_1 AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1)).computePlan(); @@ -860,9 +882,9 @@ public void testGivenClusterResize_AllocationShouldNotExceedMemoryConstraints() public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(2600).getBytes(), 2); Node node2 = new Node("n_2", ByteSizeValue.ofMb(2600).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); // First only start m_1 AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1)).computePlan(); @@ -931,9 +953,9 @@ public void testGivenClusterResize_ShouldRemoveAllocatedModels() { // Ensure that plan is removing previously allocated models if not enough memory is available Node node1 = new Node("n_1", ByteSizeValue.ofMb(1840).getBytes(), 2); Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); // Create a plan where all deployments are assigned at least once AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) @@ -965,6 +987,7 @@ public void testGivenClusterResize_ShouldRemoveAllocatedModels_NewMemoryFields() 1, Map.of(), 0, + null, ByteSizeValue.ofMb(400).getBytes(), ByteSizeValue.ofMb(100).getBytes() ); @@ -975,6 +998,7 @@ public void testGivenClusterResize_ShouldRemoveAllocatedModels_NewMemoryFields() 1, Map.of(), 0, + null, ByteSizeValue.ofMb(400).getBytes(), ByteSizeValue.ofMb(150).getBytes() ); @@ -985,6 +1009,7 @@ public void testGivenClusterResize_ShouldRemoveAllocatedModels_NewMemoryFields() 1, Map.of(), 0, + null, ByteSizeValue.ofMb(250).getBytes(), ByteSizeValue.ofMb(50).getBytes() ); @@ -1028,6 +1053,7 @@ public static List createModelsFromPlan(AssignmentPlan plan) { m.threadsPerAllocation(), currentAllocations, Math.max(m.maxAssignedAllocations(), totalAllocations), + null, 0, 0 ) @@ -1096,6 +1122,7 @@ public static Deployment randomModel(String idSuffix) { randomIntBetween(1, 4), Map.of(), 0, + null, 0, 0 ); @@ -1107,6 +1134,7 @@ public static Deployment randomModel(String idSuffix) { randomIntBetween(1, 4), Map.of(), 0, + null, randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()), randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()) ); @@ -1137,7 +1165,7 @@ private void runTooManyNodesAndModels(int nodesSize, int modelsSize) { } List deployments = new ArrayList<>(); for (int i = 0; i < modelsSize; i++) { - deployments.add(new Deployment("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0, 0, 0)); + deployments.add(new Deployment("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0, null, 0, 0)); } // Check plan is computed without OOM exception diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java index 7f83df5835494..9885c4d583198 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java @@ -25,8 +25,8 @@ public class PreserveAllAllocationsTests extends ESTestCase { public void testGivenNoPreviousAssignments() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, null, 0, 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( List.of(node1, node2), List.of(deployment1, deployment2) @@ -45,10 +45,21 @@ public void testGivenPreviousAssignments() { 1, Map.of("n_1", 1), 1, + null, + 0, + 0 + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 6, + 4, + Map.of("n_1", 1, "n_2", 2), + 3, + null, 0, 0 ); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( List.of(node1, node2), List.of(deployment1, deployment2) @@ -117,6 +128,7 @@ public void testGivenPreviousAssignments() { 1, Map.of("n_1", 1), 1, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -127,6 +139,7 @@ public void testGivenPreviousAssignments() { 4, Map.of("n_1", 1, "n_2", 2), 3, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -195,7 +208,7 @@ public void testGivenPreviousAssignments() { public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); - Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, 0, 0); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, null, 0, 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node), List.of(deployment)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java index d2907eb31160b..50ba8763c690d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java @@ -26,8 +26,8 @@ public class PreserveOneAllocationTests extends ESTestCase { public void testGivenNoPreviousAssignments() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, 0, 0); + Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); + AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, null, 0, 0); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(deployment1, deployment2)); List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); @@ -42,8 +42,18 @@ public void testGivenPreviousAssignments() { // old memory format Node node1 = new Node("n_1", ByteSizeValue.ofMb(640).getBytes(), 8); Node node2 = new Node("n_2", ByteSizeValue.ofMb(640).getBytes(), 8); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, null, 0, 0); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 6, + 4, + Map.of("n_1", 1, "n_2", 2), + 3, + null, + 0, + 0 + ); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation( List.of(node1, node2), List.of(deployment1, deployment2) @@ -117,6 +127,7 @@ public void testGivenPreviousAssignments() { 1, Map.of("n_1", 1), 1, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -127,6 +138,7 @@ public void testGivenPreviousAssignments() { 4, Map.of("n_1", 1, "n_2", 2), 3, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); @@ -199,7 +211,7 @@ public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments { // old memory format Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); - Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, 0, 0); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, null, 0, 0); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); @@ -221,6 +233,7 @@ public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments 2, Map.of("n_1", 2), 2, + null, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes() ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java index 651e4764cb894..4993600d0d3b3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java @@ -36,7 +36,7 @@ public class ZoneAwareAssignmentPlannerTests extends ESTestCase { public void testGivenOneModel_OneNode_OneZone_DoesNotFit() { Node node = new Node("n_1", 100, 1); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node)), List.of(deployment)).computePlan(); @@ -52,6 +52,7 @@ public void testGivenOneModel_OneNode_OneZone_FullyFits() { 2, Map.of(), 0, + null, 0, 0 ); @@ -70,6 +71,7 @@ public void testGivenOneModel_OneNode_OneZone_PartiallyFits() { 2, Map.of(), 0, + null, 0, 0 ); @@ -91,6 +93,7 @@ public void testGivenOneModelWithSingleAllocation_OneNode_TwoZones() { 2, Map.of(), 0, + null, 0, 0 ); @@ -118,6 +121,7 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_FullyFits() { 2, Map.of(), 0, + null, 0, 0 ); @@ -144,6 +148,7 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_PartiallyFits() { 3, Map.of(), 0, + null, 0, 0 ); @@ -168,9 +173,9 @@ public void testGivenThreeModels_TwoNodesPerZone_ThreeZones_FullyFit() { Node node4 = new Node("n_4", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node5 = new Node("n_5", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node6 = new Node("n_6", ByteSizeValue.ofMb(1000).getBytes(), 4); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 4, 1, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 6, 2, Map.of(), 0, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(30).getBytes(), 2, 3, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 6, 2, Map.of(), 0, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(30).getBytes(), 2, 3, Map.of(), 0, null, 0, 0); Map, List> nodesByZone = Map.of( List.of("z_1"), @@ -216,8 +221,8 @@ public void testGivenTwoModelsWithSingleAllocation_OneNode_ThreeZones() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); Node node3 = new Node("n_3", ByteSizeValue.ofMb(1000).getBytes(), 4); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z1"), List.of(node1), List.of("z2"), List.of(node2), List.of("z3"), List.of(node3)), @@ -255,6 +260,7 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode m.threadsPerAllocation(), previousAssignments, 0, + null, 0, 0 ) @@ -270,9 +276,9 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOnce() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(2580).getBytes(), 2); Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, null, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, null, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, null, 0, 0); // First only start m_1 AssignmentPlan assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1, node2)), List.of(deployment1)) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderRewriteTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderRewriteTests.java index 3bfe8aa390d8b..7266b165504d4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderRewriteTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderRewriteTests.java @@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -193,8 +194,7 @@ private static LearningToRankService learningToRankServiceMock() { public void testBuildContext() throws Exception { LocalModel localModel = mock(LocalModel.class); - List inputFields = List.of(DOUBLE_FIELD_NAME, INT_FIELD_NAME); - when(localModel.inputFields()).thenReturn(inputFields); + when(localModel.inputFields()).thenReturn(GOOD_MODEL_CONFIG.getInput().getFieldNames()); IndexSearcher searcher = mock(IndexSearcher.class); doAnswer(invocation -> invocation.getArgument(0)).when(searcher).rewrite(any(Query.class)); @@ -211,11 +211,48 @@ public void testBuildContext() throws Exception { assertNotNull(rescoreContext); assertThat(rescoreContext.getWindowSize(), equalTo(20)); List featureExtractors = rescoreContext.buildFeatureExtractors(context.searcher()); - assertThat(featureExtractors, hasSize(2)); - assertThat( - featureExtractors.stream().flatMap(featureExtractor -> featureExtractor.featureNames().stream()).toList(), - containsInAnyOrder("feature_1", "feature_2", DOUBLE_FIELD_NAME, INT_FIELD_NAME) + assertThat(featureExtractors, hasSize(1)); + + FeatureExtractor queryExtractor = featureExtractors.get(0); + assertThat(queryExtractor, instanceOf(QueryFeatureExtractor.class)); + assertThat(queryExtractor.featureNames(), hasSize(2)); + assertThat(queryExtractor.featureNames(), containsInAnyOrder("feature_1", "feature_2")); + } + + public void testLegacyFieldValueExtractorBuildContext() throws Exception { + // Models created before 8.15 have been saved with input fields. + // We check field value extractors are created and the deduplication is done correctly. + LocalModel localModel = mock(LocalModel.class); + when(localModel.inputFields()).thenReturn(List.of("feature_1", "field_1", "field_2")); + + IndexSearcher searcher = mock(IndexSearcher.class); + doAnswer(invocation -> invocation.getArgument(0)).when(searcher).rewrite(any(Query.class)); + SearchExecutionContext context = createSearchExecutionContext(searcher); + + LearningToRankRescorerBuilder rescorerBuilder = new LearningToRankRescorerBuilder( + localModel, + (LearningToRankConfig) GOOD_MODEL_CONFIG.getInferenceConfig(), + null, + mock(LearningToRankService.class) ); + + LearningToRankRescorerContext rescoreContext = rescorerBuilder.innerBuildContext(20, context); + assertNotNull(rescoreContext); + assertThat(rescoreContext.getWindowSize(), equalTo(20)); + List featureExtractors = rescoreContext.buildFeatureExtractors(context.searcher()); + + assertThat(featureExtractors, hasSize(2)); + + FeatureExtractor queryExtractor = featureExtractors.stream().filter(fe -> fe instanceof QueryFeatureExtractor).findFirst().get(); + assertThat(queryExtractor.featureNames(), hasSize(2)); + assertThat(queryExtractor.featureNames(), containsInAnyOrder("feature_1", "feature_2")); + + FeatureExtractor fieldValueExtractor = featureExtractors.stream() + .filter(fe -> fe instanceof FieldValueFeatureExtractor) + .findFirst() + .get(); + assertThat(fieldValueExtractor.featureNames(), hasSize(2)); + assertThat(fieldValueExtractor.featureNames(), containsInAnyOrder("field_1", "field_2")); } private LearningToRankRescorerBuilder rewriteAndFetch( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankServiceTests.java index 026dcca4bfcf7..6ca9ae4296789 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankServiceTests.java @@ -50,11 +50,10 @@ import static org.mockito.Mockito.verify; public class LearningToRankServiceTests extends ESTestCase { - public static final String GOOD_MODEL = "inferenceEntityId"; - public static final String BAD_MODEL = "badModel"; + public static final String GOOD_MODEL = "inference-entity-id"; + public static final String BAD_MODEL = "bad-model"; public static final TrainedModelConfig GOOD_MODEL_CONFIG = TrainedModelConfig.builder() .setModelId(GOOD_MODEL) - .setInput(new TrainedModelInput(List.of("field1", "field2"))) .setEstimatedOperations(1) .setModelSize(2) .setModelType(TrainedModelType.TREE_ENSEMBLE) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessorTests.java index bba2844784117..8369412580b88 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextEmbeddingProcessorTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.inference.results.MlChunkedTextEmbeddingFloatResults; +import org.elasticsearch.xpack.core.ml.inference.results.MlChunkedTextExpansionResults; import org.elasticsearch.xpack.core.ml.inference.results.MlTextEmbeddingResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.BertTokenization; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.Tokenization; @@ -16,9 +17,13 @@ import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizer; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; +import java.util.Map; + +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.core.IsNot.not; public class TextEmbeddingProcessorTests extends ESTestCase { @@ -67,4 +72,26 @@ public void testChunking() { assertThat(chunkedResult.getChunks().get(1).embedding().length, greaterThan(0)); } } + + public void testChunkingWithEmptyString() { + try ( + BertTokenizer tokenizer = BertTokenizer.builder( + TextExpansionProcessorTests.TEST_CASED_VOCAB, + new BertTokenization(null, false, 5, Tokenization.Truncate.NONE, 0) + ).build() + ) { + var pytorchResult = new PyTorchInferenceResult(new double[][][] { { { 1.0, 2.0, 3.0, 4.0, 5.0 } } }); + + var input = ""; + var tokenization = tokenizer.tokenize(input, Tokenization.Truncate.NONE, 0, 0, null); + var tokenizationResult = new BertTokenizationResult(TextExpansionProcessorTests.TEST_CASED_VOCAB, tokenization, 0); + var inferenceResult = TextExpansionProcessor.processResult(tokenizationResult, pytorchResult, Map.of(), "foo", true); + assertThat(inferenceResult, instanceOf(MlChunkedTextExpansionResults.class)); + + var chunkedResult = (MlChunkedTextExpansionResults) inferenceResult; + assertThat(chunkedResult.getChunks(), hasSize(1)); + assertEquals("", chunkedResult.getChunks().get(0).matchedText()); + assertThat(chunkedResult.getChunks().get(0).weightedTokens(), not(empty())); + } + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessorTests.java index 9803467644db9..1991275dbd2f7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextExpansionProcessorTests.java @@ -147,4 +147,26 @@ public void testChunking() { assertThat(chunkedResult.getChunks().get(1).weightedTokens(), not(empty())); } } + + public void testChunkingWithEmptyString() { + try ( + BertTokenizer tokenizer = BertTokenizer.builder( + TEST_CASED_VOCAB, + new BertTokenization(null, false, 5, Tokenization.Truncate.NONE, 0) + ).build() + ) { + var pytorchResult = new PyTorchInferenceResult(new double[][][] { { { 1.0, 2.0, 3.0, 4.0, 5.0 } } }); + + var input = ""; + var tokenization = tokenizer.tokenize(input, Tokenization.Truncate.NONE, 0, 0, null); + var tokenizationResult = new BertTokenizationResult(TEST_CASED_VOCAB, tokenization, 0); + var inferenceResult = TextExpansionProcessor.processResult(tokenizationResult, pytorchResult, Map.of(), "foo", true); + assertThat(inferenceResult, instanceOf(MlChunkedTextExpansionResults.class)); + + var chunkedResult = (MlChunkedTextExpansionResults) inferenceResult; + assertThat(chunkedResult.getChunks(), hasSize(1)); + assertEquals("", chunkedResult.getChunks().get(0).matchedText()); + assertThat(chunkedResult.getChunks().get(0).weightedTokens(), not(empty())); + } + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java index 860da3140f4fe..7eb9d7e940dda 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java @@ -276,10 +276,15 @@ public void testsTimeDependentStats() { var timeSupplier = new TimeSupplier(resultTimestamps); var processor = new PyTorchResultProcessor("foo", s -> {}, timeSupplier); + for (int i = 0; i < 10; i++) { + processor.registerRequest("foo" + i, ActionListener.noop()); + } + // 1st period - processor.processInferenceResult(wrapInferenceResult("foo", false, 200L)); - processor.processInferenceResult(wrapInferenceResult("foo", false, 200L)); - processor.processInferenceResult(wrapInferenceResult("foo", false, 200L)); + processor.processInferenceResult(wrapInferenceResult("foo0", false, 200L)); + processor.processInferenceResult(wrapInferenceResult("foo1", false, 200L)); + processor.processInferenceResult(wrapInferenceResult("foo2", false, 200L)); + // first call has no results as is in the same period var stats = processor.getResultStats(); assertThat(stats.recentStats().requestsProcessed(), equalTo(0L)); @@ -293,7 +298,7 @@ public void testsTimeDependentStats() { assertThat(stats.peakThroughput(), equalTo(3L)); // 2nd period - processor.processInferenceResult(wrapInferenceResult("foo", false, 100L)); + processor.processInferenceResult(wrapInferenceResult("foo3", false, 100L)); stats = processor.getResultStats(); assertNotNull(stats.recentStats()); assertThat(stats.recentStats().requestsProcessed(), equalTo(1L)); @@ -305,7 +310,7 @@ public void testsTimeDependentStats() { assertThat(stats.recentStats().requestsProcessed(), equalTo(0L)); // 4th period - processor.processInferenceResult(wrapInferenceResult("foo", false, 300L)); + processor.processInferenceResult(wrapInferenceResult("foo4", false, 300L)); stats = processor.getResultStats(); assertNotNull(stats.recentStats()); assertThat(stats.recentStats().requestsProcessed(), equalTo(1L)); @@ -313,8 +318,8 @@ public void testsTimeDependentStats() { assertThat(stats.lastUsed(), equalTo(Instant.ofEpochMilli(resultTimestamps[9]))); // 7th period - processor.processInferenceResult(wrapInferenceResult("foo", false, 410L)); - processor.processInferenceResult(wrapInferenceResult("foo", false, 390L)); + processor.processInferenceResult(wrapInferenceResult("foo5", false, 410L)); + processor.processInferenceResult(wrapInferenceResult("foo6", false, 390L)); stats = processor.getResultStats(); assertThat(stats.recentStats().requestsProcessed(), equalTo(0L)); assertThat(stats.recentStats().avgInferenceTime(), nullValue()); @@ -325,9 +330,9 @@ public void testsTimeDependentStats() { assertThat(stats.lastUsed(), equalTo(Instant.ofEpochMilli(resultTimestamps[12]))); // 8th period - processor.processInferenceResult(wrapInferenceResult("foo", false, 510L)); - processor.processInferenceResult(wrapInferenceResult("foo", false, 500L)); - processor.processInferenceResult(wrapInferenceResult("foo", false, 490L)); + processor.processInferenceResult(wrapInferenceResult("foo7", false, 510L)); + processor.processInferenceResult(wrapInferenceResult("foo8", false, 500L)); + processor.processInferenceResult(wrapInferenceResult("foo9", false, 490L)); stats = processor.getResultStats(); assertNotNull(stats.recentStats()); assertThat(stats.recentStats().requestsProcessed(), equalTo(3L)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java index fef9b07429702..c3ad54427f70c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java @@ -133,7 +133,8 @@ public void testNodeLoadDetection() { Priority.NORMAL, 0L, 0L - ) + ), + null ) .addRoutingEntry("_node_id4", new RoutingInfo(1, 1, RoutingState.STARTING, "")) .addRoutingEntry("_node_id2", new RoutingInfo(1, 1, RoutingState.FAILED, "test")) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java index b086fef6f10f4..8da6fc843614e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilderTests.java @@ -182,6 +182,30 @@ public void testToQuery() throws IOException { } } + @Override + public void testFromXContent() throws IOException { + super.testFromXContent(); + assertCriticalWarnings(TextExpansionQueryBuilder.TEXT_EXPANSION_DEPRECATION_MESSAGE); + } + + @Override + public void testUnknownField() throws IOException { + super.testUnknownField(); + assertCriticalWarnings(TextExpansionQueryBuilder.TEXT_EXPANSION_DEPRECATION_MESSAGE); + } + + @Override + public void testUnknownObjectException() throws IOException { + super.testUnknownObjectException(); + assertCriticalWarnings(TextExpansionQueryBuilder.TEXT_EXPANSION_DEPRECATION_MESSAGE); + } + + @Override + public void testValidOutput() throws IOException { + super.testValidOutput(); + assertCriticalWarnings(TextExpansionQueryBuilder.TEXT_EXPANSION_DEPRECATION_MESSAGE); + } + public void testIllegalValues() { { IllegalArgumentException e = expectThrows( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java index 7c1f499640e64..b6c450c84d596 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestStartTrainedModelDeploymentActionTests.java @@ -7,12 +7,18 @@ package org.elasticsearch.xpack.ml.rest.inference; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.test.rest.RestActionTestCase; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; @@ -21,81 +27,252 @@ import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Function; +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.FULLY_ALLOCATED; +import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTING; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class RestStartTrainedModelDeploymentActionTests extends RestActionTestCase { + private final TestCase testCase; - public void testCacheDisabled() { - final boolean disableInferenceProcessCache = true; - controller().registerHandler(new RestStartTrainedModelDeploymentAction(disableInferenceProcessCache)); - SetOnce executeCalled = new SetOnce<>(); - verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { - assertThat(actionRequest, instanceOf(StartTrainedModelDeploymentAction.Request.class)); - - var request = (StartTrainedModelDeploymentAction.Request) actionRequest; - assertThat(request.getCacheSize(), is(ByteSizeValue.ZERO)); + public RestStartTrainedModelDeploymentActionTests(TestCase testCase) { + this.testCase = testCase; + } - executeCalled.set(true); - return createResponse(); - })); + @ParametersFactory(shuffle = false) + public static Iterable parameters() throws Exception { + List testCases = List.of( + // parsing from body only + TestCase.of( + "Parses body timeout field", + false, + (description, request) -> assertThat(description, request.getTimeout(), is(TimeValue.timeValueSeconds(4))), + Map.of(), + XContentFactory.jsonBuilder().startObject().field("timeout", "4s").endObject() + ), + TestCase.of( + "Parses body wait_for state field", + false, + (description, request) -> assertThat(description, request.getWaitForState(), is(FULLY_ALLOCATED)), + Map.of(), + XContentFactory.jsonBuilder().startObject().field("wait_for", FULLY_ALLOCATED.toString()).endObject() + ), + TestCase.of( + "Parses body number_of_allocations field", + false, + (description, request) -> assertThat(description, request.getNumberOfAllocations(), is(2)), + Map.of(), + XContentFactory.jsonBuilder().startObject().field("number_of_allocations", "2").endObject() + ), + TestCase.of( + "Parses body threads_per_allocation field", + false, + (description, request) -> assertThat(description, request.getThreadsPerAllocation(), is(2)), + Map.of(), + XContentFactory.jsonBuilder().startObject().field("threads_per_allocation", "2").endObject() + ), + TestCase.of( + "Parses body queue_capacity field", + false, + (description, request) -> assertThat(description, request.getQueueCapacity(), is(2)), + Map.of(), + XContentFactory.jsonBuilder().startObject().field("queue_capacity", "2").endObject() + ), + TestCase.of( + "Parses body cache_size field", + false, + (description, request) -> assertThat(description, request.getCacheSize(), is(ByteSizeValue.ofMb(2))), + Map.of(), + XContentFactory.jsonBuilder().startObject().field("cache_size", "2mb").endObject() + ), + // parsing from query params only + TestCase.of( + "Parses query param timeout field", + false, + (description, request) -> assertThat(description, request.getTimeout(), is(TimeValue.timeValueSeconds(4))), + Map.of("timeout", "4s") + ), + TestCase.of( + "Parses query param wait_for state field", + false, + (description, request) -> assertThat(description, request.getWaitForState(), is(FULLY_ALLOCATED)), + Map.of("wait_for", FULLY_ALLOCATED.toString()) + ), + TestCase.of( + "Parses query param number_of_allocations field", + false, + (description, request) -> assertThat(description, request.getNumberOfAllocations(), is(2)), + Map.of("number_of_allocations", "2") + ), + TestCase.of( + "Parses query param threads_per_allocation field", + false, + (description, request) -> assertThat(description, request.getThreadsPerAllocation(), is(2)), + Map.of("threads_per_allocation", "2") + ), + TestCase.of( + "Parses query param queue_capacity field", + false, + (description, request) -> assertThat(description, request.getQueueCapacity(), is(2)), + Map.of("queue_capacity", "2") + ), + TestCase.of( + "Parses query param cache_size field", + false, + (description, request) -> assertThat(description, request.getCacheSize(), is(ByteSizeValue.ofMb(2))), + Map.of("cache_size", "2mb") + ), + // query params override body + TestCase.of( + "Query param overrides body timeout field", + false, + (description, request) -> assertThat(description, request.getTimeout(), is(TimeValue.timeValueSeconds(4))), + Map.of("timeout", "4s"), + XContentFactory.jsonBuilder().startObject().field("timeout", "2s").endObject() + ), + TestCase.of( + "Query param overrides body wait_for state field", + false, + (description, request) -> assertThat(description, request.getWaitForState(), is(STARTING)), + Map.of("wait_for", STARTING.toString()), + XContentFactory.jsonBuilder().startObject().field("wait_for", FULLY_ALLOCATED.toString()).endObject() + ), + TestCase.of( + "Query param overrides body number_of_allocations field", + false, + (description, request) -> assertThat(description, request.getNumberOfAllocations(), is(5)), + Map.of("number_of_allocations", "5"), + XContentFactory.jsonBuilder().startObject().field("number_of_allocations", "2").endObject() + ), + TestCase.of( + "Query param overrides body threads_per_allocation field", + false, + (description, request) -> assertThat(description, request.getThreadsPerAllocation(), is(3)), + Map.of("threads_per_allocation", "3"), + XContentFactory.jsonBuilder().startObject().field("threads_per_allocation", "2").endObject() + ), + TestCase.of( + "Query param overrides body queue_capacity field", + false, + (description, request) -> assertThat(description, request.getQueueCapacity(), is(2)), + Map.of("queue_capacity", "2"), + XContentFactory.jsonBuilder().startObject().field("queue_capacity", "1").endObject() + ), + TestCase.of( + "Query param overrides body cache_size field", + false, + (description, request) -> assertThat(description, request.getCacheSize(), is(ByteSizeValue.ofMb(3))), + Map.of("cache_size", "3mb"), + XContentFactory.jsonBuilder().startObject().field("cache_size", "2mb").endObject() + ), + // cache size tests + TestCase.of( + "Disables cache_size", + true, + (description, request) -> assertThat(description, request.getCacheSize(), is(ByteSizeValue.ZERO)), + Map.of() + ), + TestCase.of( + "Sets cache_size to null", + false, + (description, request) -> assertNull(description, request.getCacheSize()), + Map.of() + ) + ); - RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) - .withPath("_ml/trained_models/test_id/deployment/_start") - .build(); - dispatchRequest(inferenceRequest); - assertThat(executeCalled.get(), equalTo(true)); + return testCases.stream().map(TestCase::toArray).collect(toList()); } - public void testCacheEnabled() { - final boolean disableInferenceProcessCache = false; - controller().registerHandler(new RestStartTrainedModelDeploymentAction(disableInferenceProcessCache)); + /** + * This test is run for each of the supplied {@link TestCase} configurations. + * @throws IOException _ + */ + public void test() throws IOException { + controller().registerHandler(testCase.action); SetOnce executeCalled = new SetOnce<>(); verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { assertThat(actionRequest, instanceOf(StartTrainedModelDeploymentAction.Request.class)); var request = (StartTrainedModelDeploymentAction.Request) actionRequest; - assertNull(request.getCacheSize()); - - executeCalled.set(true); - return createResponse(); - })); + testCase.verifyingAssertFunc.accept(testCase.testDescription, request); - RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) - .withPath("_ml/trained_models/test_id/deployment/_start") - .build(); - dispatchRequest(inferenceRequest); - assertThat(executeCalled.get(), equalTo(true)); - } - - public void testExceptionFromDifferentParamsInQueryAndBody() throws IOException { - SetOnce executeCalled = new SetOnce<>(); - controller().registerHandler(new RestStartTrainedModelDeploymentAction(false)); - verifyingClient.setExecuteVerifier(((actionType, actionRequest) -> { - assertThat(actionRequest, instanceOf(StartTrainedModelDeploymentAction.Request.class)); executeCalled.set(true); return createResponse(); })); - Map paramsMap = new HashMap<>(1); - paramsMap.put("cache_size", "1mb"); - RestRequest inferenceRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) - .withPath("_ml/trained_models/test_id/deployment/_start") - .withParams(paramsMap) - .withContent( - BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("cache_size", "2mb").endObject()), - XContentType.JSON - ) - .build(); - dispatchRequest(inferenceRequest); - assertThat(executeCalled.get(), equalTo(null)); // the duplicate parameter should cause an exception, but the exception isn't - // visible here, so we just check that the request failed + dispatchRequest(testCase.buildRequestFunc.apply(xContentRegistry())); + assertThat(testCase.testDescription, executeCalled.get(), equalTo(true)); } private static CreateTrainedModelAssignmentAction.Response createResponse() { return new CreateTrainedModelAssignmentAction.Response(TrainedModelAssignmentTests.randomInstance()); } + + /** + * A single test case + * @param testDescription description of the test + * @param action the rest action specifying whether the cache should be disabled + * @param verifyingAssertFunc an assertion function that will be called after the + * {@link RestStartTrainedModelDeploymentAction#prepareRequest} method is called + * @param buildRequestFunc a function for constructing a fake request + */ + public record TestCase( + String testDescription, + RestStartTrainedModelDeploymentAction action, + BiConsumer verifyingAssertFunc, + Function buildRequestFunc + ) { + private static TestCase of( + String testDescription, + boolean shouldDisableCache, + BiConsumer verifyingAssertFunc, + Map queryParams, + @Nullable XContentBuilder builder + ) { + return new TestCase( + testDescription, + new RestStartTrainedModelDeploymentAction(shouldDisableCache), + verifyingAssertFunc, + buildRequest(queryParams, builder) + ); + } + + private static TestCase of( + String testDescription, + boolean shouldDisableCache, + BiConsumer verifyingAssertFunc, + Map queryParams + ) { + return of(testDescription, shouldDisableCache, verifyingAssertFunc, queryParams, null); + } + + private static Function buildRequest(Map queryParams, XContentBuilder builder) { + Map params = new HashMap<>(Map.of("model_id", "model", "deployment_id", "dep")); + params.putAll(queryParams); + + return (registry) -> { + var requestBuilder = new FakeRestRequest.Builder(registry).withMethod(RestRequest.Method.POST) + .withPath("_ml/trained_models/test_id/deployment/_start") + .withParams(params); + + if (builder != null) { + requestBuilder = requestBuilder.withContent(BytesReference.bytes(builder), XContentType.JSON); + } + + return requestBuilder.build(); + }; + } + + Object[] toArray() { + return new Object[] { this }; + } + } + } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentActionTests.java index 2bb10d66d3d58..cce6b284a524d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentActionTests.java @@ -30,7 +30,7 @@ public void testNumberOfAllocationInParam() { assertThat(actionRequest, instanceOf(UpdateTrainedModelDeploymentAction.Request.class)); var request = (UpdateTrainedModelDeploymentAction.Request) actionRequest; - assertEquals(request.getNumberOfAllocations(), 5); + assertEquals(request.getNumberOfAllocations().intValue(), 5); executeCalled.set(true); return mock(CreateTrainedModelAssignmentAction.Response.class); @@ -53,7 +53,7 @@ public void testNumberOfAllocationInBody() { assertThat(actionRequest, instanceOf(UpdateTrainedModelDeploymentAction.Request.class)); var request = (UpdateTrainedModelDeploymentAction.Request) actionRequest; - assertEquals(request.getNumberOfAllocations(), 6); + assertEquals(request.getNumberOfAllocations().intValue(), 6); executeCalled.set(true); return mock(CreateTrainedModelAssignmentAction.Response.class); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index 12eeaf8732235..e0433ea6fdd71 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 17; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 18; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/HostMetadata.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/HostMetadata.java index 29f3b66956d55..acfcd228b731e 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/HostMetadata.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/HostMetadata.java @@ -25,9 +25,9 @@ final class HostMetadata implements ToXContentObject { final int profilingNumCores; // number of cores on the profiling host machine HostMetadata(String hostID, InstanceType instanceType, String hostArchitecture, Integer profilingNumCores) { - this.hostID = hostID; - this.instanceType = instanceType; - this.hostArchitecture = hostArchitecture; + this.hostID = hostID != null ? hostID : ""; + this.instanceType = instanceType != null ? instanceType : new InstanceType("", "", ""); + this.hostArchitecture = hostArchitecture != null ? hostArchitecture : ""; this.profilingNumCores = profilingNumCores != null ? profilingNumCores : DEFAULT_PROFILING_NUM_CORES; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java index e486ffd194472..3b361748abf67 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java @@ -51,7 +51,9 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 8: Changed from disabled _source to synthetic _source for profiling-events-* and profiling-metrics // version 9: Changed sort order for profiling-events-* // version 10: changed mapping profiling-events @timestamp to 'date_nanos' from 'date' - public static final int INDEX_TEMPLATE_VERSION = 10; + // version 11: Added 'profiling.agent.protocol' keyword mapping to profiling-hosts + // version 12: Added 'profiling.agent.env_https_proxy' keyword mapping to profiling-hosts + public static final int INDEX_TEMPLATE_VERSION = 12; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index public static final int PROFILING_EVENTS_VERSION = 4; diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/CO2CalculatorTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/CO2CalculatorTests.java index a7b9a97b71acc..ff698465a56c5 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/CO2CalculatorTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/CO2CalculatorTests.java @@ -82,6 +82,41 @@ public void testCreateFromRegularSource() { checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_D, samples), annualCoreHours, 1.7d, 0.000379069d, 2.8d); } + // Make sure that malformed data doesn't cause the CO2 calculation to fail. + public void testCreateFromMalformedSource() { + // tag::noformat + Map hostsTable = Map.ofEntries( + Map.entry(HOST_ID_A, + // known datacenter and instance type + new HostMetadata(HOST_ID_A, + new InstanceType( + "aws", + "eu-west-1", + "c5n.xlarge" + ), + null, + null + ) + ), + Map.entry(HOST_ID_B, + new HostMetadata(HOST_ID_B, + null, + null, + null + ) + ) + ); + // end::noformat + + double samplingDurationInSeconds = 1_800.0d; // 30 minutes + long samples = 100_000L; // 100k samples + double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); + CO2Calculator co2Calculator = new CO2Calculator(hostsTable, samplingDurationInSeconds, null, null, null, null); + + checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_A, samples), annualCoreHours, 1.135d, 0.0002786d, 7.0d); + checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_B, samples), annualCoreHours, 1.7d, 0.000379069d, 7.0d); + } + private void checkCO2Calculation( double calculatedAnnualCO2Tons, double annualCoreHours, diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java index 229f900ef3d15..f5a9f4e9b0c3e 100644 --- a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderTests.java @@ -48,7 +48,10 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ssb.parseXContent(parser, true, nf -> true).rewrite(null) + ); assertEquals("[search_after] cannot be used in children of compound retrievers", iae.getMessage()); } @@ -60,7 +63,10 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ssb.parseXContent(parser, true, nf -> true).rewrite(null) + ); assertEquals("[terminate_after] cannot be used in children of compound retrievers", iae.getMessage()); } @@ -71,7 +77,10 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ssb.parseXContent(parser, true, nf -> true).rewrite(null) + ); assertEquals("[sort] cannot be used in children of compound retrievers", iae.getMessage()); } @@ -82,7 +91,10 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ssb.parseXContent(parser, true, nf -> true).rewrite(null) + ); assertEquals("[min_score] cannot be used in children of compound retrievers", iae.getMessage()); } @@ -94,7 +106,10 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ssb.parseXContent(parser, true, nf -> true).rewrite(null) + ); assertEquals("[collapse] cannot be used in children of compound retrievers", iae.getMessage()); } @@ -105,7 +120,10 @@ public void testRetrieverExtractionErrors() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ssb.parseXContent(parser, true, nf -> true).rewrite(null) + ); assertEquals("[rank] cannot be used in children of compound retrievers", iae.getMessage()); } } @@ -119,7 +137,10 @@ public void testRetrieverBuilderParsingMaxDepth() throws IOException { ) ) { SearchSourceBuilder ssb = new SearchSourceBuilder(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> ssb.parseXContent(parser, true, nf -> true)); + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> ssb.parseXContent(parser, true, nf -> true).rewrite(null) + ); assertEquals("[1:65] [rrf] failed to parse field [retrievers]", iae.getMessage()); assertEquals( "the nested depth of the [standard] retriever exceeds the maximum nested depth [2] for retrievers", diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index 987e3e99bb91a..ba25a774ff540 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -381,7 +381,7 @@ private static List unrollAgg( // long count = -1; if (agg instanceof InternalMultiBucketAggregation == false) { - count = getAggCount(agg, rolled.getAsMap()); + count = getAggCount(agg, rolled); } return unrollAgg(agg, original.get(agg.getName()), currentTree.get(agg.getName()), count); @@ -522,7 +522,7 @@ T extends InternalMultiBucketAggregation> InternalAggregation unrollMultiB .map(bucket -> { // Grab the value from the count agg (if it exists), which represents this bucket's doc_count - long bucketCount = getAggCount(source, bucket.getAggregations().getAsMap()); + long bucketCount = getAggCount(source, bucket.getAggregations()); // Don't generate buckets if the doc count is zero if (bucketCount == 0) { @@ -566,7 +566,7 @@ private static InternalAggregations unrollSubAggsFromMulti(InternalBucket bucket .filter(subAgg -> subAgg.getName().endsWith("." + RollupField.COUNT_FIELD) == false) .map(subAgg -> { - long count = getAggCount(subAgg, bucket.getAggregations().asMap()); + long count = getAggCount(subAgg, bucket.getAggregations()); InternalAggregation originalSubAgg = null; if (original != null && original.getAggregations() != null) { @@ -617,7 +617,7 @@ private static InternalAggregation unrollMetric(SingleValue metric, long count) } } - private static long getAggCount(Aggregation agg, Map aggMap) { + private static long getAggCount(Aggregation agg, InternalAggregations aggregations) { String countPath = null; if (agg.getType().equals(DateHistogramAggregationBuilder.NAME) @@ -630,10 +630,10 @@ private static long getAggCount(Aggregation agg, Map {} ); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 24c034358be74..ad5e6a0cf9b40 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -41,6 +41,7 @@ import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.startsWith; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -549,33 +550,29 @@ public void testMultipleJobTriggering() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ThreadPool threadPool = new TestThreadPool(getTestName()); try { - final AtomicBoolean isAborted = new AtomicBoolean(false); - DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null) { - @Override - protected void onAbort() { - isAborted.set(true); - } - }; + DelayedEmptyRollupIndexer indexer = new DelayedEmptyRollupIndexer(threadPool, job, state, null); indexer.start(); for (int i = 0; i < 5; i++) { final CountDownLatch latch = indexer.newLatch(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + // This may take more than one attempt due to a cleanup/transition phase + // that happens after state change to STARTED (`isJobFinishing`). + assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); assertFalse(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED))); - assertThat(indexer.getStats().getNumInvocations(), equalTo((long) i + 1)); assertThat(indexer.getStats().getNumPages(), equalTo((long) i + 1)); } final CountDownLatch latch = indexer.newLatch(); - assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); assertThat(indexer.stop(), equalTo(IndexerState.STOPPING)); assertThat(indexer.getState(), Matchers.either(Matchers.is(IndexerState.STOPPING)).or(Matchers.is(IndexerState.STOPPED))); latch.countDown(); assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STOPPED))); assertTrue(indexer.abort()); + assertThat(indexer.getStats().getNumInvocations(), greaterThanOrEqualTo(6L)); } finally { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/searchable-snapshots/build.gradle b/x-pack/plugin/searchable-snapshots/build.gradle index c5cd000ef7774..4e309499445e6 100644 --- a/x-pack/plugin/searchable-snapshots/build.gradle +++ b/x-pack/plugin/searchable-snapshots/build.gradle @@ -15,6 +15,7 @@ base { dependencies { compileOnly project(path: xpackModule('core')) compileOnly project(path: xpackModule('blob-cache')) + compileOnly project(path: ':libs:elasticsearch-native') testImplementation(testArtifact(project(xpackModule('blob-cache')))) internalClusterTestImplementation(testArtifact(project(xpackModule('core')))) internalClusterTestImplementation(project(path: xpackModule('shutdown'))) diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 5204bdfcc78e6..6dfe1c5835285 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; @@ -100,11 +101,11 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + createIndexWithTimestampAndEventIngested(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -117,11 +118,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Either add data outside of the range, or documents that don't have timestamp data final boolean indexDataWithTimestamp = randomBoolean(); // Add enough documents to have non-metadata segment files in all shards, - // otherwise the mount operation might go through as the read won't be - // blocked + // otherwise the mount operation might go through as the read won't be blocked final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); if (indexDataWithTimestamp) { - indexDocumentsWithTimestampWithinDate( + indexDocumentsWithTimestampAndEventIngestedDates( indexOutsideSearchRange, numberOfDocsInIndexOutsideSearchRange, TIMESTAMP_TEMPLATE_OUTSIDE_RANGE @@ -132,7 +132,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Index enough documents to ensure that all shards have at least some documents int numDocsWithinRange = between(100, 1000); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -166,9 +166,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); final boolean includeIndexCoveringSearchRangeInSearchRequest = randomBoolean(); List indicesToSearch = new ArrayList<>(); @@ -176,7 +177,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying indicesToSearch.add(indexWithinSearchRange); } indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -250,20 +253,44 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + + // check that @timestamp and 'event.ingested' are now in cluster state final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + final DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.getTimestampFieldType(); + assertThat(timestampDataFieldType, notNullValue()); + final DateFieldMapper.DateFieldType eventIngestedDataFieldType = timestampFieldTypeInfo.getEventIngestedFieldType(); + assertThat(eventIngestedDataFieldType, notNullValue()); + + final DateFieldMapper.Resolution timestampResolution = timestampDataFieldType.resolution(); + final DateFieldMapper.Resolution eventIngestedResolution = eventIngestedDataFieldType.resolution(); if (indexDataWithTimestamp) { assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertThat( updatedTimestampMillisRange.getMin(), - greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) ); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); } else { assertThat(updatedTimestampMillisRange, sameInstance(IndexLongFieldRange.EMPTY)); + assertThat(updatedEventIngestedRange, sameInstance(IndexLongFieldRange.EMPTY)); } // Stop the node holding the searchable snapshots, and since we defined @@ -383,6 +410,171 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } + /** + * Test shard skipping when only 'event.ingested' is in the index and cluster state. + */ + public void testEventIngestedRangeInSearchAgainstSearchableSnapshotShards() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode(); + final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode(); + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot); + + final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); + + final String timestampField = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + + createIndexWithOnlyOneTimestampField(timestampField, indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + + final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); + createIndexWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + indexWithinSearchRangeShardCount, + Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build() + ); + + final int totalShards = indexOutsideSearchRangeShardCount + indexWithinSearchRangeShardCount; + + // Add enough documents to have non-metadata segment files in all shards, + // otherwise the mount operation might go through as the read won't be blocked + final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); + + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexOutsideSearchRange, + numberOfDocsInIndexOutsideSearchRange, + TIMESTAMP_TEMPLATE_OUTSIDE_RANGE + ); + + // Index enough documents to ensure that all shards have at least some documents + int numDocsWithinRange = between(100, 1000); + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + numDocsWithinRange, + TIMESTAMP_TEMPLATE_WITHIN_RANGE + ); + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexOutsideSearchRange)).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indexOutsideSearchRange)); + + final String searchableSnapshotIndexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + // Block the repository for the node holding the searchable snapshot shards + // to delay its restore + blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot); + + // Force the searchable snapshot to be allocated in a particular node + Settings restoredIndexSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot) + .build(); + + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + searchableSnapshotIndexOutsideSearchRange, + repositoryName, + snapshotId.getName(), + indexOutsideSearchRange, + restoredIndexSettings, + Strings.EMPTY_ARRAY, + false, + randomFrom(MountSearchableSnapshotRequest.Storage.values()) + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + + final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + + // Allow the searchable snapshots to be finally mounted + unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot); + waitUntilRecoveryIsDone(searchableSnapshotIndexOutsideSearchRange); + ensureGreen(searchableSnapshotIndexOutsideSearchRange); + + IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); + IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + + // @timestamp range should be null since it was not included in the index or indexed docs + assertThat(updatedTimestampMillisRange, equalTo(IndexLongFieldRange.UNKNOWN)); + assertThat(updatedEventIngestedMillisRange, not(equalTo(IndexLongFieldRange.UNKNOWN))); + + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + + DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.getTimestampFieldType(); + assertThat(timestampDataFieldType, nullValue()); + + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampFieldTypeInfo.getEventIngestedFieldType(); + assertThat(eventIngestedFieldType, notNullValue()); + + DateFieldMapper.Resolution eventIngestedResolution = eventIngestedFieldType.resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + // now do a search against event.ingested + List indicesToSearch = new ArrayList<>(); + indicesToSearch.add(indexWithinSearchRange); + indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); + + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2020-11-28T00:00:00.000000000Z", true) + .to("2020-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + // All the regular index searches succeeded + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // All the searchable snapshots shards were skipped + assertThat(searchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchShardResult.notSkipped().size(), equalTo(indexWithinSearchRangeShardCount)); + } + + // query a range that covers both indexes - all shards should be searched, none skipped + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2019-11-28T00:00:00.000000000Z", true) + .to("2021-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(0)); + assertThat(searchShardResult.notSkipped().size(), equalTo(totalShards)); + } + } + /** * Can match against searchable snapshots is tested via both the Search API and the SearchShards (transport-only) API. * The latter is a way to do only a can-match rather than all search phases. @@ -396,7 +588,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.builder() @@ -404,7 +596,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() .build() ); - indexDocumentsWithTimestampWithinDate(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -438,11 +630,14 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + final String timestampField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -500,14 +695,29 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); - assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); - assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + + final IndexLongFieldRange updatedTimestampRange = updatedIndexMetadata.getTimestampRange(); + DateFieldMapper.Resolution tsResolution = timestampFieldTypeInfo.getTimestampFieldType().resolution(); + ; + assertThat(updatedTimestampRange.isComplete(), equalTo(true)); + assertThat(updatedTimestampRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat(updatedTimestampRange.getMin(), greaterThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); + assertThat(updatedTimestampRange.getMax(), lessThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.getEventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -579,7 +789,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -587,7 +797,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo .build() ); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -621,11 +831,13 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -680,13 +892,32 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + final DateFieldMapper.Resolution timestampResolution = timestampFieldTypeInfo.getTimestampFieldType().resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-28T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-29T00:00:00Z")))); + assertThat( + updatedTimestampMillisRange.getMin(), + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); + + final IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + final DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.getEventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -724,17 +955,24 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo } } - private void createIndexWithTimestamp(String indexName, int numShards, Settings extraSettings) throws IOException { + private void createIndexWithTimestampAndEventIngested(String indexName, int numShards, Settings extraSettings) throws IOException { assertAcked( indicesAdmin().prepareCreate(indexName) .setMapping( XContentFactory.jsonBuilder() .startObject() .startObject("properties") + .startObject(DataStream.TIMESTAMP_FIELD_NAME) .field("type", randomFrom("date", "date_nanos")) .field("format", "strict_date_optional_time_nanos") .endObject() + + .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + .endObject() .endObject() ) @@ -743,12 +981,70 @@ private void createIndexWithTimestamp(String indexName, int numShards, Settings ensureGreen(indexName); } - private void indexDocumentsWithTimestampWithinDate(String indexName, int docCount, String timestampTemplate) throws Exception { + private void createIndexWithOnlyOneTimestampField(String timestampField, String index, int numShards, Settings extraSettings) + throws IOException { + assertAcked( + indicesAdmin().prepareCreate(index) + .setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + + .startObject(timestampField) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + + .endObject() + .endObject() + ) + .setSettings(indexSettingsNoReplicas(numShards).put(INDEX_SOFT_DELETES_SETTING.getKey(), true).put(extraSettings)) + ); + ensureGreen(index); + } + + private void indexDocumentsWithOnlyOneTimestampField(String timestampField, String index, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + indexRequestBuilders.add( + prepareIndex(index).setSource( + timestampField, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ) + ) + ); + } + indexRandom(true, false, indexRequestBuilders); + + assertThat(indicesAdmin().prepareForceMerge(index).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), equalTo(0)); + refresh(index); + forceMerge(); + } + + private void indexDocumentsWithTimestampAndEventIngestedDates(String indexName, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < docCount; i++) { indexRequestBuilders.add( prepareIndex(indexName).setSource( DataStream.TIMESTAMP_FIELD_NAME, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ), + IndexMetadata.EVENT_INGESTED_FIELD_NAME, String.format( Locale.ROOT, timestampTemplate, @@ -789,4 +1085,39 @@ private void waitUntilRecoveryIsDone(String index) throws Exception { private void waitUntilAllShardsAreUnassigned(Index index) throws Exception { awaitClusterState(state -> state.getRoutingTable().index(index).allPrimaryShardsUnassigned()); } + + record SearchShardAPIResult(List skipped, List notSkipped) {} + + private static SearchShardAPIResult doSearchShardAPIQuery( + List indicesToSearch, + RangeQueryBuilder rangeQuery, + boolean allowPartialSearchResults, + int expectedTotalShards + ) { + SearchShardsRequest searchShardsRequest = new SearchShardsRequest( + indicesToSearch.toArray(new String[0]), + SearchRequest.DEFAULT_INDICES_OPTIONS, + rangeQuery, + null, + null, + allowPartialSearchResults, + null + ); + + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); + assertThat(searchShardsResponse.getGroups().size(), equalTo(expectedTotalShards)); + List> partitionedBySkipped = searchShardsResponse.getGroups() + .stream() + .collect( + Collectors.teeing( + Collectors.filtering(g -> g.skipped(), Collectors.toList()), + Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), + List::of + ) + ); + + List skipped = partitionedBySkipped.get(0); + List notSkipped = partitionedBySkipped.get(1); + return new SearchShardAPIResult(skipped, notSkipped); + } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index c738033761b3e..56aec13cbab29 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -145,18 +145,17 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { assertShardFolders(indexName, false); - assertThat( - clusterAdmin().prepareState() - .clear() - .setMetadata(true) - .setIndices(indexName) - .get() - .getState() - .metadata() - .index(indexName) - .getTimestampRange(), - sameInstance(IndexLongFieldRange.UNKNOWN) - ); + IndexMetadata indexMetadata = clusterAdmin().prepareState() + .clear() + .setMetadata(true) + .setIndices(indexName) + .get() + .getState() + .metadata() + .index(indexName); + + assertThat(indexMetadata.getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); final boolean deletedBeforeMount = randomBoolean(); if (deletedBeforeMount) { @@ -252,18 +251,17 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { ensureGreen(restoredIndexName); assertBusy(() -> assertShardFolders(restoredIndexName, true), 30, TimeUnit.SECONDS); - assertThat( - clusterAdmin().prepareState() - .clear() - .setMetadata(true) - .setIndices(restoredIndexName) - .get() - .getState() - .metadata() - .index(restoredIndexName) - .getTimestampRange(), - sameInstance(IndexLongFieldRange.UNKNOWN) - ); + indexMetadata = clusterAdmin().prepareState() + .clear() + .setMetadata(true) + .setIndices(restoredIndexName) + .get() + .getState() + .metadata() + .index(restoredIndexName); + + assertThat(indexMetadata.getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); if (deletedBeforeMount) { assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(0)); @@ -684,21 +682,29 @@ public void testSnapshotMountedIndexLeavesBlobsUntouched() throws Exception { public void testSnapshotMountedIndexWithTimestampsRecordsTimestampRangeInIndexMetadata() throws Exception { final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - final int numShards = between(1, 3); + int numShards = between(1, 3); boolean indexed = randomBoolean(); - final String dateType = randomFrom("date", "date_nanos"); + String dateType = randomFrom("date", "date_nanos"); assertAcked( indicesAdmin().prepareCreate(indexName) .setMapping( XContentFactory.jsonBuilder() .startObject() .startObject("properties") + .startObject(DataStream.TIMESTAMP_FIELD_NAME) .field("type", dateType) .field("index", indexed) .field("format", "strict_date_optional_time_nanos") .endObject() + + .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) + .field("type", dateType) + .field("index", indexed) + .field("format", "strict_date_optional_time_nanos") + .endObject() + .endObject() .endObject() ) @@ -712,6 +718,15 @@ public void testSnapshotMountedIndexWithTimestampsRecordsTimestampRangeInIndexMe indexRequestBuilders.add( prepareIndex(indexName).setSource( DataStream.TIMESTAMP_FIELD_NAME, + String.format( + Locale.ROOT, + "2020-11-26T%02d:%02d:%02d.%09dZ", + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ), + IndexMetadata.EVENT_INGESTED_FIELD_NAME, String.format( Locale.ROOT, "2020-11-26T%02d:%02d:%02d.%09dZ", @@ -740,32 +755,45 @@ public void testSnapshotMountedIndexWithTimestampsRecordsTimestampRangeInIndexMe mountSnapshot(repositoryName, snapshotOne.getName(), indexName, indexName, Settings.EMPTY); ensureGreen(indexName); - final IndexLongFieldRange timestampRange = clusterAdmin().prepareState() + final IndexMetadata indexMetadata = clusterAdmin().prepareState() .clear() .setMetadata(true) .setIndices(indexName) .get() .getState() .metadata() - .index(indexName) - .getTimestampRange(); + .index(indexName); + final IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); assertTrue(timestampRange.isComplete()); + final IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + assertTrue(eventIngestedRange.isComplete()); + if (indexed) { assertThat(timestampRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); if (docCount == 0) { assertThat(timestampRange, sameInstance(IndexLongFieldRange.EMPTY)); + assertThat(eventIngestedRange, sameInstance(IndexLongFieldRange.EMPTY)); } else { assertThat(timestampRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + + // both @timestamp and event.ingested have the same resolution in this test DateFieldMapper.Resolution resolution = dateType.equals("date") ? DateFieldMapper.Resolution.MILLISECONDS : DateFieldMapper.Resolution.NANOSECONDS; + assertThat(timestampRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); assertThat(timestampRange.getMin(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + + assertThat(eventIngestedRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); + assertThat(eventIngestedRange.getMin(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); } } else { assertThat(timestampRange, sameInstance(IndexLongFieldRange.UNKNOWN)); + assertThat(eventIngestedRange, sameInstance(IndexLongFieldRange.UNKNOWN)); } } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFileTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFileTests.java index 92ec94963c0c6..372dddc6eca71 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFileTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/common/CacheFileTests.java @@ -14,11 +14,11 @@ import org.elasticsearch.blobcache.BlobCacheTestUtils; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.filesystem.FileSystemNatives; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtilsForTesting; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.searchablesnapshots.cache.common.CacheFile.EvictionListener; @@ -393,15 +393,7 @@ public void testFSyncFailure() throws Exception { } } - private static void assumeLinux64bitsOrWindows() { - assumeTrue( - "This test uses native methods implemented only for Windows & Linux 64bits", - Constants.WINDOWS || Constants.LINUX && Constants.JRE_IS_64BIT - ); - } - public void testCacheFileCreatedAsSparseFile() throws Exception { - assumeLinux64bitsOrWindows(); final long fourKb = 4096L; final long oneMb = 1 << 20; @@ -420,7 +412,7 @@ public void testCacheFileCreatedAsSparseFile() throws Exception { final FileChannel fileChannel = cacheFile.getChannel(); assertTrue(Files.exists(file)); - OptionalLong sizeOnDisk = FileSystemNatives.allocatedSizeInBytes(file); + OptionalLong sizeOnDisk = NativeAccess.instance().allocatedSizeInBytes(file); assertTrue(sizeOnDisk.isPresent()); assertThat(sizeOnDisk.getAsLong(), equalTo(0L)); @@ -430,7 +422,7 @@ public void testCacheFileCreatedAsSparseFile() throws Exception { fill(fileChannel, Math.toIntExact(cacheFile.getLength() - 1L), Math.toIntExact(cacheFile.getLength())); fileChannel.force(false); - sizeOnDisk = FileSystemNatives.allocatedSizeInBytes(file); + sizeOnDisk = NativeAccess.instance().allocatedSizeInBytes(file); assertTrue(sizeOnDisk.isPresent()); assertThat( "Cache file should be sparse and not fully allocated on disk", @@ -445,7 +437,7 @@ public void testCacheFileCreatedAsSparseFile() throws Exception { fill(fileChannel, 0, Math.toIntExact(cacheFile.getLength())); fileChannel.force(false); - sizeOnDisk = FileSystemNatives.allocatedSizeInBytes(file); + sizeOnDisk = NativeAccess.instance().allocatedSizeInBytes(file); assertTrue(sizeOnDisk.isPresent()); assertThat( "Cache file should be fully allocated on disk (maybe more given cluster/block size)", diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java index 29828fba085d8..3994fb50c7fc6 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java @@ -114,7 +114,8 @@ */ public class AutoConfigureNode extends EnvironmentAwareCommand { - public static final String AUTO_CONFIG_ALT_DN = "CN=Elasticsearch security auto-configuration HTTP CA"; + public static final String AUTO_CONFIG_HTTP_ALT_DN = "CN=Elasticsearch security auto-configuration HTTP CA"; + public static final String AUTO_CONFIG_TRANSPORT_ALT_DN = "CN=Elasticsearch security auto-configuration transport CA"; // the transport keystore is also used as a truststore private static final String SIGNATURE_ALGORITHM = "SHA256withRSA"; private static final String TRANSPORT_AUTOGENERATED_KEYSTORE_NAME = "transport"; @@ -272,7 +273,8 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce final List transportAddresses; final String cnValue = NODE_NAME_SETTING.exists(env.settings()) ? NODE_NAME_SETTING.get(env.settings()) : System.getenv("HOSTNAME"); final X500Principal certificatePrincipal = new X500Principal("CN=" + cnValue); - final X500Principal caPrincipal = new X500Principal(AUTO_CONFIG_ALT_DN); + final X500Principal httpCaPrincipal = new X500Principal(AUTO_CONFIG_HTTP_ALT_DN); + final X500Principal transportCaPrincipal = new X500Principal(AUTO_CONFIG_TRANSPORT_ALT_DN); if (inEnrollmentMode) { // this is an enrolling node, get HTTP CA key/certificate and transport layer key/certificate from another node @@ -402,7 +404,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce final KeyPair transportCaKeyPair = CertGenUtils.generateKeyPair(TRANSPORT_CA_KEY_SIZE); final PrivateKey transportCaKey = transportCaKeyPair.getPrivate(); transportCaCert = CertGenUtils.generateSignedCertificate( - caPrincipal, + transportCaPrincipal, null, transportCaKeyPair, null, @@ -429,7 +431,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce httpCaKey = httpCaKeyPair.getPrivate(); // self-signed CA httpCaCert = CertGenUtils.generateSignedCertificate( - caPrincipal, + httpCaPrincipal, null, httpCaKeyPair, null, diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java index d1dbe9d037756..129d85d0818b2 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/AutoConfigureNodeTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.KeyStoreUtil; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.http.HttpTransportSettings; @@ -32,6 +33,8 @@ import java.util.List; import static java.nio.file.StandardOpenOption.CREATE_NEW; +import static org.elasticsearch.xpack.security.cli.AutoConfigureNode.AUTO_CONFIG_HTTP_ALT_DN; +import static org.elasticsearch.xpack.security.cli.AutoConfigureNode.AUTO_CONFIG_TRANSPORT_ALT_DN; import static org.elasticsearch.xpack.security.cli.AutoConfigureNode.anyRemoteHostNodeAddress; import static org.elasticsearch.xpack.security.cli.AutoConfigureNode.removePreviousAutoconfiguration; import static org.hamcrest.Matchers.equalTo; @@ -131,6 +134,21 @@ public void testRemovePreviousAutoconfigurationRetainsUserAdded() throws Excepti assertEquals(file1, removePreviousAutoconfiguration(file2)); } + public void testSubjectAndIssuerForGeneratedCertificates() throws Exception { + // test no publish settings + Path tempDir = createTempDir(); + try { + Files.createDirectory(tempDir.resolve("config")); + // empty yml file, it just has to exist + Files.write(tempDir.resolve("config").resolve("elasticsearch.yml"), List.of(), CREATE_NEW); + Tuple generatedCerts = runAutoConfigAndReturnCertificates(tempDir, Settings.EMPTY); + assertThat(checkSubjectAndIssuerDN(generatedCerts.v1(), "CN=dummy.test.hostname", AUTO_CONFIG_HTTP_ALT_DN), is(true)); + assertThat(checkSubjectAndIssuerDN(generatedCerts.v2(), "CN=dummy.test.hostname", AUTO_CONFIG_TRANSPORT_ALT_DN), is(true)); + } finally { + deleteDirectory(tempDir); + } + } + public void testGeneratedHTTPCertificateSANs() throws Exception { // test no publish settings Path tempDir = createTempDir(); @@ -262,6 +280,14 @@ private boolean checkGeneralNameSan(X509Certificate certificate, String generalN return false; } + private boolean checkSubjectAndIssuerDN(X509Certificate certificate, String subjectName, String issuerName) throws Exception { + if (certificate.getSubjectX500Principal().getName().equals(subjectName) + && certificate.getIssuerX500Principal().getName().equals(issuerName)) { + return true; + } + return false; + } + private void verifyExtendedKeyUsage(X509Certificate httpCertificate) throws Exception { List extendedKeyUsage = httpCertificate.getExtendedKeyUsage(); assertEquals("Only one extended key usage expected for HTTP certificate.", 1, extendedKeyUsage.size()); @@ -270,6 +296,11 @@ private void verifyExtendedKeyUsage(X509Certificate httpCertificate) throws Exce } private X509Certificate runAutoConfigAndReturnHTTPCertificate(Path configDir, Settings settings) throws Exception { + Tuple generatedCertificates = runAutoConfigAndReturnCertificates(configDir, settings); + return generatedCertificates.v1(); + } + + private Tuple runAutoConfigAndReturnCertificates(Path configDir, Settings settings) throws Exception { final Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", configDir).put(settings).build()); // runs the command to auto-generate the config files and the keystore new AutoConfigureNode(false).execute(MockTerminal.create(), new OptionParser().parse(), env, null); @@ -278,16 +309,28 @@ private X509Certificate runAutoConfigAndReturnHTTPCertificate(Path configDir, Se nodeKeystore.decrypt(new char[0]); // the keystore is always bootstrapped with an empty password SecureString httpKeystorePassword = nodeKeystore.getString("xpack.security.http.ssl.keystore.secure_password"); + SecureString transportKeystorePassword = nodeKeystore.getString("xpack.security.transport.ssl.keystore.secure_password"); final Settings newSettings = Settings.builder().loadFromPath(env.configFile().resolve("elasticsearch.yml")).build(); final String httpKeystorePath = newSettings.get("xpack.security.http.ssl.keystore.path"); + final String transportKeystorePath = newSettings.get("xpack.security.transport.ssl.keystore.path"); KeyStore httpKeystore = KeyStoreUtil.readKeyStore( configDir.resolve("config").resolve(httpKeystorePath), "PKCS12", httpKeystorePassword.getChars() ); - return (X509Certificate) httpKeystore.getCertificate("http"); + + KeyStore transportKeystore = KeyStoreUtil.readKeyStore( + configDir.resolve("config").resolve(transportKeystorePath), + "PKCS12", + transportKeystorePassword.getChars() + ); + + X509Certificate httpCertificate = (X509Certificate) httpKeystore.getCertificate("http"); + X509Certificate transportCertificate = (X509Certificate) transportKeystore.getCertificate("transport"); + + return new Tuple<>(httpCertificate, transportCertificate); } private void deleteDirectory(Path directory) throws IOException { diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt b/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/core/licenses/nimbus-jose-jwt-NOTICE.txt b/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt similarity index 100% rename from x-pack/plugin/core/licenses/nimbus-jose-jwt-NOTICE.txt rename to x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 3dd8d780d6f82..ffa4d1082c7e6 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -57,6 +57,7 @@ public class Constants { "cluster:admin/persistent/update_status", "cluster:admin/reindex/rethrottle", "cluster:admin/repository/_cleanup", + "cluster:admin/repository/analyze", "cluster:admin/repository/delete", "cluster:admin/repository/get", "cluster:admin/repository/put", @@ -277,7 +278,10 @@ public class Constants { "cluster:admin/xpack/security/remote_cluster_credentials/reload", "cluster:admin/xpack/security/role/delete", "cluster:admin/xpack/security/role/get", + "cluster:admin/xpack/security/role/query", "cluster:admin/xpack/security/role/put", + "cluster:admin/xpack/security/role/bulk_put", + "cluster:admin/xpack/security/role/bulk_delete", "cluster:admin/xpack/security/role_mapping/delete", "cluster:admin/xpack/security/role_mapping/get", "cluster:admin/xpack/security/role_mapping/put", diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java index 6889c81664173..9e680688edeaa 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java @@ -8,6 +8,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -325,6 +326,13 @@ public void testNonOperatorUserWillFailToCallDesiredNodesAPI() throws IOExceptio assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(403)); } + public void testNonOperatorUserCanCallAnalyzeRepositoryAPI() throws IOException { + createSnapshotRepo("testAnalysisRepo"); + var request = new Request("POST", "/_snapshot/testAnalysisRepo/_analyze"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + } + private void createSnapshotRepo(String repoName) throws IOException { Request request = new Request("PUT", "/_snapshot/" + repoName); request.setJsonEntity( diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java new file mode 100644 index 0000000000000..f81bab4866bdf --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/LicenseDLSFLSRoleIT.java @@ -0,0 +1,196 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.local.model.User; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.security.QueryRoleIT.assertQuery; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; + +/** + * This class tests that roles with DLS and FLS are disabled when queried when the license doesn't allow such features. + */ +public final class LicenseDLSFLSRoleIT extends ESRestTestCase { + + protected static final String REST_USER = "security_test_user"; + private static final SecureString REST_PASSWORD = new SecureString("security-test-password".toCharArray()); + private static final String ADMIN_USER = "admin_user"; + private static final SecureString ADMIN_PASSWORD = new SecureString("admin-password".toCharArray()); + protected static final String READ_SECURITY_USER = "read_security_user"; + private static final SecureString READ_SECURITY_PASSWORD = new SecureString("read-security-password".toCharArray()); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(1) + .distribution(DistributionType.DEFAULT) + // start as "trial" + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.http.ssl.enabled", "false") + .setting("xpack.security.transport.ssl.enabled", "false") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user(ADMIN_USER, ADMIN_PASSWORD.toString(), User.ROOT_USER_ROLE, true) + .user(REST_USER, REST_PASSWORD.toString(), "security_test_role", false) + .user(READ_SECURITY_USER, READ_SECURITY_PASSWORD.toString(), "read_security_user_role", false) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue(ADMIN_USER, ADMIN_PASSWORD); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(REST_USER, REST_PASSWORD); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @SuppressWarnings("unchecked") + public void testQueryDLSFLSRolesShowAsDisabled() throws Exception { + // auto-generated "trial" + waitForLicense(adminClient(), "trial"); + // neither DLS nor FLS role + { + RoleDescriptor.IndicesPrivileges[] indicesPrivileges = new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("no-dls-nor-fls*").privileges("read").build() }; + createRoleWithIndicesPrivileges(adminClient(), "role_with_neither", indicesPrivileges); + } + // role with DLS + { + RoleDescriptor.IndicesPrivileges[] indicesPrivileges = new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("read").query("{\"match_all\":{}}").build() }; + createRoleWithIndicesPrivileges(adminClient(), "role_with_DLS", indicesPrivileges); + } + // role with FLS + { + RoleDescriptor.IndicesPrivileges[] indicesPrivileges = new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .grantedFields("granted_field1", "granted*") + .build() }; + createRoleWithIndicesPrivileges(adminClient(), "role_with_FLS", indicesPrivileges); + } + // role with DLS and FLS + { + RoleDescriptor.IndicesPrivileges[] indicesPrivileges = new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .grantedFields("granted_field1", "granted*") + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .query("{\"match\": {\"category\": \"click\"}}") + .build() }; + createRoleWithIndicesPrivileges(adminClient(), "role_with_FLS_and_DLS", indicesPrivileges); + } + assertQuery(client(), "", 4, roles -> { + roles.sort(Comparator.comparing(o -> ((String) o.get("name")))); + assertThat(roles, iterableWithSize(4)); + assertThat(roles.get(0).get("name"), equalTo("role_with_DLS")); + assertRoleEnabled(roles.get(0), true); + assertThat(roles.get(1).get("name"), equalTo("role_with_FLS")); + assertRoleEnabled(roles.get(1), true); + assertThat(roles.get(2).get("name"), equalTo("role_with_FLS_and_DLS")); + assertRoleEnabled(roles.get(2), true); + assertThat(roles.get(3).get("name"), equalTo("role_with_neither")); + assertRoleEnabled(roles.get(3), true); + }); + // start "basic" license + Request request = new Request("POST", "/_license/start_basic?acknowledge=true"); + Response response = adminClient().performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + assertTrue(((Boolean) responseMap.get("basic_was_started"))); + assertTrue(((Boolean) responseMap.get("acknowledged"))); + waitForLicense(adminClient(), "basic"); + // now the same roles show up as disabled ("enabled" is "false") + assertQuery(client(), "", 4, roles -> { + roles.sort(Comparator.comparing(o -> ((String) o.get("name")))); + assertThat(roles, iterableWithSize(4)); + assertThat(roles.get(0).get("name"), equalTo("role_with_DLS")); + assertRoleEnabled(roles.get(0), false); + assertThat(roles.get(1).get("name"), equalTo("role_with_FLS")); + assertRoleEnabled(roles.get(1), false); + assertThat(roles.get(2).get("name"), equalTo("role_with_FLS_and_DLS")); + assertRoleEnabled(roles.get(2), false); + // role with neither DLS nor FLS is still enabled + assertThat(roles.get(3).get("name"), equalTo("role_with_neither")); + assertRoleEnabled(roles.get(3), true); + }); + } + + @SuppressWarnings("unchecked") + private void createRoleWithIndicesPrivileges(RestClient adminClient, String name, RoleDescriptor.IndicesPrivileges[] indicesPrivileges) + throws IOException { + Request request = new Request("POST", "/_security/role/" + name); + Map requestMap = new HashMap<>(); + requestMap.put(RoleDescriptor.Fields.INDICES.getPreferredName(), indicesPrivileges); + BytesReference source = BytesReference.bytes(jsonBuilder().map(requestMap)); + request.setJsonEntity(source.utf8ToString()); + Response response = adminClient.performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + assertTrue((Boolean) ((Map) responseMap.get("role")).get("created")); + } + + @SuppressWarnings("unchecked") + private static void assertRoleEnabled(Map roleMap, boolean enabled) { + assertTrue(roleMap.containsKey("transient_metadata")); + assertThat(roleMap.get("transient_metadata"), instanceOf(Map.class)); + assertThat(((Map) roleMap.get("transient_metadata")).get("enabled"), equalTo(enabled)); + } + + @SuppressWarnings("unchecked") + private static void waitForLicense(RestClient adminClient, String type) throws Exception { + final Request request = new Request("GET", "_license"); + assertBusy(() -> { + Response response; + try { + response = adminClient.performRequest(request); + } catch (ResponseException e) { + throw new AssertionError("license not yet installed", e); + } + assertOK(response); + Map responseMap = responseAsMap(response); + assertTrue(responseMap.containsKey("license")); + assertThat(((Map) responseMap.get("license")).get("status"), equalTo("active")); + assertThat(((Map) responseMap.get("license")).get("type"), equalTo(type)); + }); + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryRoleIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryRoleIT.java new file mode 100644 index 0000000000000..1588749b9a331 --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryRoleIT.java @@ -0,0 +1,653 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; +import org.elasticsearch.xpack.security.support.SecurityMigrations; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_DATA_KEY; +import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_KEY; +import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.iterableWithSize; + +public final class QueryRoleIT extends SecurityInBasicRestTestCase { + + private static final String READ_SECURITY_USER_AUTH_HEADER = "Basic cmVhZF9zZWN1cml0eV91c2VyOnJlYWQtc2VjdXJpdHktcGFzc3dvcmQ="; + + public void testSimpleQueryAllRoles() throws IOException { + assertQuery("", 0, roles -> assertThat(roles, emptyIterable())); + RoleDescriptor createdRole = createRandomRole(); + assertQuery("", 1, roles -> { + assertThat(roles, iterableWithSize(1)); + assertRoleMap(roles.get(0), createdRole); + }); + assertQuery(""" + {"query":{"match_all":{}},"from":1}""", 1, roles -> assertThat(roles, emptyIterable())); + } + + public void testDisallowedFields() throws Exception { + if (randomBoolean()) { + createRandomRole(); + } + // query on some disallowed field + { + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); + request.setJsonEntity(""" + {"query":{"prefix":{"password":"whatever"}}}"""); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("Field [password] is not allowed for querying or aggregation")); + } + // query on the _id field + { + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); + request.setJsonEntity(""" + {"query":{"term":{"_id":"role-test"}}}"""); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("Field [_id] is not allowed for querying or aggregation")); + } + // sort on disallowed field + { + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); + request.setJsonEntity(""" + {"query":{"bool":{"must_not":[{"wildcard":{"applications.application":"a*9"}}]}},"sort":["api_key_hash"]}"""); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("Field [api_key_hash] is not allowed for querying or aggregation")); + } + } + + public void testDisallowedQueryType() throws Exception { + if (randomBoolean()) { + createRandomRole(); + } + // query using some disallowed query type + { + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); + request.setJsonEntity(""" + {"query":{"match_phrase":{"description":{"query":"whatever"}}}}"""); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("Query type [match_phrase] is not currently supported in this context")); + } + // query using some disallowed query type inside the (allowed) boolean query type + { + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); + request.setJsonEntity(""" + {"query":{"bool":{"must_not":[{"more_like_this":{"fields":["description"],"like":"hollywood"}}]}}}"""); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("Query type [more_like_this] is not currently supported in this context")); + } + } + + public void testSimpleMetadataSearch() throws Exception { + int nroles = randomIntBetween(1, 3); + for (int i = 0; i < nroles; i++) { + createRandomRole(); + } + RoleDescriptor matchesOnMetadataValue = createRole( + "matchesOnMetadataValue", + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("matchSimpleKey", "matchSimpleValue"), + randomApplicationPrivileges() + ); + RoleDescriptor matchesOnMetadataKey = createRole( + "matchesOnMetadataKey", + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("matchSimpleKey", "other"), + randomApplicationPrivileges() + ); + createRole( + "other2", + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("other", "matchSimpleValue"), + randomApplicationPrivileges() + ); + waitForMigrationCompletion(adminClient(), SecurityMigrations.ROLE_METADATA_FLATTENED_MIGRATION_VERSION); + assertQuery(""" + {"query":{"term":{"metadata.matchSimpleKey":"matchSimpleValue"}}}""", 1, roles -> { + assertThat(roles, iterableWithSize(1)); + assertRoleMap(roles.get(0), matchesOnMetadataValue); + }); + assertQuery(""" + {"query":{"exists":{"field":"metadata.matchSimpleKey"}}}""", 2, roles -> { + assertThat(roles, iterableWithSize(2)); + roles.sort(Comparator.comparing(o -> ((String) o.get("name")))); + assertRoleMap(roles.get(0), matchesOnMetadataKey); + assertRoleMap(roles.get(1), matchesOnMetadataValue); + }); + } + + public void testSearchMultipleMetadataFields() throws Exception { + createRole( + "noMetadataRole", + randomBoolean() ? null : randomAlphaOfLength(8), + randomBoolean() ? null : Map.of(), + randomApplicationPrivileges() + ); + RoleDescriptor role1 = createRole( + "1" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("simpleField1", "matchThis", "simpleField2", "butNotThis"), + randomApplicationPrivileges() + ); + createRole( + "2" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("simpleField2", "butNotThis"), + randomApplicationPrivileges() + ); + RoleDescriptor role3 = createRole( + "3" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("listField1", List.of("matchThis", "butNotThis"), "listField2", List.of("butNotThisToo")), + randomApplicationPrivileges() + ); + createRole( + "4" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("listField2", List.of("butNotThisToo", "andAlsoNotThis")), + randomApplicationPrivileges() + ); + RoleDescriptor role5 = createRole( + "5" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("listField1", List.of("maybeThis", List.of("matchThis")), "listField2", List.of("butNotThis")), + randomApplicationPrivileges() + ); + RoleDescriptor role6 = createRole( + "6" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("mapField1", Map.of("innerField", "matchThis")), + randomApplicationPrivileges() + ); + createRole( + "7" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("mapField1", Map.of("innerField", "butNotThis")), + randomApplicationPrivileges() + ); + RoleDescriptor role8 = createRole( + "8" + randomAlphaOfLength(4), + randomBoolean() ? null : randomAlphaOfLength(8), + Map.of("mapField1", Map.of("innerField", "butNotThis", "innerField2", Map.of("deeperInnerField", "matchThis"))), + randomApplicationPrivileges() + ); + waitForMigrationCompletion(adminClient(), SecurityMigrations.ROLE_METADATA_FLATTENED_MIGRATION_VERSION); + Consumer>> matcher = roles -> { + assertThat(roles, iterableWithSize(5)); + roles.sort(Comparator.comparing(o -> ((String) o.get("name")))); + assertRoleMap(roles.get(0), role1); + assertRoleMap(roles.get(1), role3); + assertRoleMap(roles.get(2), role5); + assertRoleMap(roles.get(3), role6); + assertRoleMap(roles.get(4), role8); + }; + assertQuery(""" + {"query":{"prefix":{"metadata":"match"}}}""", 5, matcher); + assertQuery(""" + {"query":{"simple_query_string":{"fields":["meta*"],"query":"matchThis"}}}""", 5, matcher); + } + + @SuppressWarnings("unchecked") + public void testSimpleSort() throws IOException { + // some other non-matching roles + int nOtherRoles = randomIntBetween(1, 5); + for (int i = 0; i < nOtherRoles; i++) { + createRandomRole(); + } + // some matching roles (at least 2, for sorting) + int nMatchingRoles = randomIntBetween(2, 5); + for (int i = 0; i < nMatchingRoles; i++) { + ApplicationResourcePrivileges[] applicationResourcePrivileges = randomArray( + 1, + 5, + ApplicationResourcePrivileges[]::new, + this::randomApplicationResourcePrivileges + ); + { + int matchingApplicationIndex = randomIntBetween(0, applicationResourcePrivileges.length - 1); + // make sure the "application" matches the filter query below ("a*9") + applicationResourcePrivileges[matchingApplicationIndex] = RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("a" + randomAlphaOfLength(4) + "9") + .resources(applicationResourcePrivileges[matchingApplicationIndex].getResources()) + .privileges(applicationResourcePrivileges[matchingApplicationIndex].getPrivileges()) + .build(); + } + { + int matchingApplicationIndex = randomIntBetween(0, applicationResourcePrivileges.length - 1); + int matchingResourcesIndex = randomIntBetween( + 0, + applicationResourcePrivileges[matchingApplicationIndex].getResources().length - 1 + ); + // make sure the "resources" matches the terms query below ("99") + applicationResourcePrivileges[matchingApplicationIndex] = RoleDescriptor.ApplicationResourcePrivileges.builder() + .application(applicationResourcePrivileges[matchingApplicationIndex].getApplication()) + .resources(applicationResourcePrivileges[matchingApplicationIndex].getResources()[matchingResourcesIndex] = "99") + .privileges(applicationResourcePrivileges[matchingApplicationIndex].getPrivileges()) + .build(); + } + createRole( + randomAlphaOfLength(4) + i, + randomBoolean() ? null : randomAlphaOfLength(8), + randomBoolean() ? null : randomMetadata(), + applicationResourcePrivileges + ); + } + assertQuery(""" + {"query":{"bool":{"filter":[{"wildcard":{"applications.application":"a*9"}}]}},"sort":["name"]}""", nMatchingRoles, roles -> { + assertThat(roles, iterableWithSize(nMatchingRoles)); + // assert sorting on name + for (int i = 0; i < nMatchingRoles; i++) { + assertThat(roles.get(i).get("_sort"), instanceOf(List.class)); + assertThat(((List) roles.get(i).get("_sort")), iterableWithSize(1)); + assertThat(((List) roles.get(i).get("_sort")).get(0), equalTo(roles.get(i).get("name"))); + } + // assert the ascending sort order + for (int i = 1; i < nMatchingRoles; i++) { + int compareNames = roles.get(i - 1).get("name").toString().compareTo(roles.get(i).get("name").toString()); + assertThat(compareNames < 0, is(true)); + } + }); + assertQuery( + """ + {"query":{"bool":{"must":[{"terms":{"applications.resources":["99"]}}]}},"sort":["applications.privileges"]}""", + nMatchingRoles, + roles -> { + assertThat(roles, iterableWithSize(nMatchingRoles)); + // assert sorting on best "applications.privileges" + for (int i = 0; i < nMatchingRoles; i++) { + assertThat(roles.get(i).get("_sort"), instanceOf(List.class)); + assertThat(((List) roles.get(i).get("_sort")), iterableWithSize(1)); + assertThat(((List) roles.get(i).get("_sort")).get(0), equalTo(getPrivilegeNameUsedForSorting(roles.get(i)))); + } + // assert the ascending sort order + for (int i = 1; i < nMatchingRoles; i++) { + int comparePrivileges = getPrivilegeNameUsedForSorting(roles.get(i - 1)).compareTo( + getPrivilegeNameUsedForSorting(roles.get(i)) + ); + assertThat(comparePrivileges < 0, is(true)); + } + } + ); + } + + @SuppressWarnings("unchecked") + public void testSortWithPagination() throws IOException { + int roleIdx = 0; + // some non-matching roles + int nOtherRoles = randomIntBetween(0, 5); + for (int i = 0; i < nOtherRoles; i++) { + createRole( + Strings.format("role_%03d", roleIdx++), + randomBoolean() ? null : randomDescription(), + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + } + // first matching role + RoleDescriptor firstMatchingRole = createRole( + Strings.format("role_%03d", roleIdx++), + "some ZZZZmatchZZZZ descr", + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + nOtherRoles = randomIntBetween(0, 5); + for (int i = 0; i < nOtherRoles; i++) { + createRole( + Strings.format("role_%03d", roleIdx++), + randomBoolean() ? null : randomDescription(), + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + } + // second matching role + RoleDescriptor secondMatchingRole = createRole( + Strings.format("role_%03d", roleIdx++), + "other ZZZZmatchZZZZ meh", + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + nOtherRoles = randomIntBetween(0, 5); + for (int i = 0; i < nOtherRoles; i++) { + createRole( + Strings.format("role_%03d", roleIdx++), + randomBoolean() ? null : randomDescription(), + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + } + // third matching role + RoleDescriptor thirdMatchingRole = createRole( + Strings.format("role_%03d", roleIdx++), + "me ZZZZmatchZZZZ go", + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + nOtherRoles = randomIntBetween(0, 5); + for (int i = 0; i < nOtherRoles; i++) { + createRole( + Strings.format("role_%03d", roleIdx++), + randomBoolean() ? null : randomDescription(), + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + } + String queryTemplate = """ + {"query":{"match":{"description":{"query":"ZZZZmatchZZZZ"}}}, + "size":1, + "sort":[{"name":{"order":"desc"}},{"applications.resources":{"order":"asc"}}] + %s + }"""; + AtomicReference searchAfter = new AtomicReference<>(""); + Consumer> searchAfterChain = roleMap -> { + assertThat(roleMap.get("_sort"), instanceOf(List.class)); + assertThat(((List) roleMap.get("_sort")), iterableWithSize(2)); + String firstSortValue = ((List) roleMap.get("_sort")).get(0); + assertThat(firstSortValue, equalTo(roleMap.get("name"))); + String secondSortValue = ((List) roleMap.get("_sort")).get(1); + searchAfter.set( + ",\"search_after\":[\"" + + firstSortValue + + "\"," + + (secondSortValue != null ? ("\"" + secondSortValue + "\"") : "null") + + "]" + ); + }; + assertQuery(Strings.format(queryTemplate, searchAfter.get()), 3, roles -> { + assertThat(roles, iterableWithSize(1)); + assertRoleMap(roles.get(0), thirdMatchingRole); + searchAfterChain.accept(roles.get(0)); + }); + assertQuery(Strings.format(queryTemplate, searchAfter.get()), 3, roles -> { + assertThat(roles, iterableWithSize(1)); + assertRoleMap(roles.get(0), secondMatchingRole); + searchAfterChain.accept(roles.get(0)); + }); + assertQuery(Strings.format(queryTemplate, searchAfter.get()), 3, roles -> { + assertThat(roles, iterableWithSize(1)); + assertRoleMap(roles.get(0), firstMatchingRole); + searchAfterChain.accept(roles.get(0)); + }); + // no more results + assertQuery(Strings.format(queryTemplate, searchAfter.get()), 3, roles -> assertThat(roles, emptyIterable())); + } + + @SuppressWarnings("unchecked") + private String getPrivilegeNameUsedForSorting(Map roleMap) { + String bestPrivilege = null; + List> applications = (List>) roleMap.get("applications"); + if (applications == null) { + return bestPrivilege; + } + for (Map application : applications) { + List privileges = (List) application.get("privileges"); + if (privileges != null) { + for (String privilege : privileges) { + if (bestPrivilege == null) { + bestPrivilege = privilege; + } else if (privilege.compareTo(bestPrivilege) < 0) { + bestPrivilege = privilege; + } + } + } + } + return bestPrivilege; + } + + private RoleDescriptor createRandomRole() throws IOException { + return createRole( + randomUUID(), + randomBoolean() ? null : randomDescription(), + randomBoolean() ? null : randomMetadata(), + randomApplicationPrivileges() + ); + } + + private ApplicationResourcePrivileges[] randomApplicationPrivileges() { + ApplicationResourcePrivileges[] applicationResourcePrivileges = randomArray( + 0, + 3, + ApplicationResourcePrivileges[]::new, + this::randomApplicationResourcePrivileges + ); + return applicationResourcePrivileges.length == 0 && randomBoolean() ? null : applicationResourcePrivileges; + } + + @SuppressWarnings("unchecked") + private RoleDescriptor createRole( + String roleName, + String description, + Map metadata, + ApplicationResourcePrivileges... applicationResourcePrivileges + ) throws IOException { + Request request = new Request("POST", "/_security/role/" + roleName); + Map requestMap = new HashMap<>(); + if (description != null) { + requestMap.put(RoleDescriptor.Fields.DESCRIPTION.getPreferredName(), description); + } + if (metadata != null) { + requestMap.put(RoleDescriptor.Fields.METADATA.getPreferredName(), metadata); + } + if (applicationResourcePrivileges != null) { + requestMap.put(RoleDescriptor.Fields.APPLICATIONS.getPreferredName(), applicationResourcePrivileges); + } + BytesReference source = BytesReference.bytes(jsonBuilder().map(requestMap)); + request.setJsonEntity(source.utf8ToString()); + Response response = adminClient().performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + assertTrue((Boolean) ((Map) responseMap.get("role")).get("created")); + return new RoleDescriptor( + roleName, + null, + null, + applicationResourcePrivileges, + null, + null, + metadata, + null, + null, + null, + null, + description + ); + } + + private void assertQuery(String body, int total, Consumer>> roleVerifier) throws IOException { + assertQuery(client(), body, total, roleVerifier); + } + + private static Request queryRoleRequestWithAuth() { + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); + return request; + } + + public static void assertQuery(RestClient client, String body, int total, Consumer>> roleVerifier) + throws IOException { + Request request = queryRoleRequestWithAuth(); + request.setJsonEntity(body); + Response response = client.performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + assertThat(responseMap.get("total"), is(total)); + @SuppressWarnings("unchecked") + List> roles = new ArrayList<>((List>) responseMap.get("roles")); + assertThat(roles.size(), is(responseMap.get("count"))); + roleVerifier.accept(roles); + } + + @SuppressWarnings("unchecked") + private void assertRoleMap(Map roleMap, RoleDescriptor roleDescriptor) { + assertThat(roleMap.get("name"), equalTo(roleDescriptor.getName())); + if (Strings.isNullOrEmpty(roleDescriptor.getDescription())) { + assertThat(roleMap.get("description"), nullValue()); + } else { + assertThat(roleMap.get("description"), equalTo(roleDescriptor.getDescription())); + } + // "applications" is always present + assertThat(roleMap.get("applications"), instanceOf(Iterable.class)); + if (roleDescriptor.getApplicationPrivileges().length == 0) { + assertThat((Iterable) roleMap.get("applications"), emptyIterable()); + } else { + assertThat( + (Iterable>) roleMap.get("applications"), + iterableWithSize(roleDescriptor.getApplicationPrivileges().length) + ); + Iterator> responseIterator = ((Iterable>) roleMap.get("applications")).iterator(); + Iterator descriptorIterator = Arrays.asList(roleDescriptor.getApplicationPrivileges()) + .iterator(); + while (responseIterator.hasNext()) { + assertTrue(descriptorIterator.hasNext()); + Map responsePrivilege = responseIterator.next(); + ApplicationResourcePrivileges descriptorPrivilege = descriptorIterator.next(); + assertThat(responsePrivilege.get("application"), equalTo(descriptorPrivilege.getApplication())); + assertThat(responsePrivilege.get("privileges"), equalTo(Arrays.asList(descriptorPrivilege.getPrivileges()))); + assertThat(responsePrivilege.get("resources"), equalTo(Arrays.asList(descriptorPrivilege.getResources()))); + } + assertFalse(descriptorIterator.hasNext()); + } + // in this test suite all roles are always enabled + assertTrue(roleMap.containsKey("transient_metadata")); + assertThat(roleMap.get("transient_metadata"), Matchers.instanceOf(Map.class)); + assertThat(((Map) roleMap.get("transient_metadata")).get("enabled"), equalTo(true)); + } + + private Map randomMetadata() { + return randomMetadata(3); + } + + private Map randomMetadata(int maxLevel) { + int size = randomIntBetween(0, 5); + Map metadata = new HashMap<>(size); + for (int i = 0; i < size; i++) { + switch (randomFrom(1, 2, 3, 4, 5)) { + case 1: + metadata.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); + break; + case 2: + metadata.put(randomAlphaOfLength(4), randomInt()); + break; + case 3: + metadata.put(randomAlphaOfLength(4), randomList(0, 3, () -> randomAlphaOfLength(4))); + break; + case 4: + metadata.put(randomAlphaOfLength(4), randomList(0, 3, () -> randomInt(4))); + break; + case 5: + if (maxLevel > 0) { + metadata.put(randomAlphaOfLength(4), randomMetadata(maxLevel - 1)); + } + break; + } + } + return metadata; + } + + private ApplicationResourcePrivileges randomApplicationResourcePrivileges() { + String applicationName; + if (randomBoolean()) { + applicationName = "*"; + } else { + applicationName = randomAlphaOfLength(1).toLowerCase(Locale.ROOT) + randomAlphaOfLengthBetween(2, 10); + } + Supplier privilegeNameSupplier = () -> randomAlphaOfLength(1).toLowerCase(Locale.ROOT) + randomAlphaOfLengthBetween(2, 8); + int size = randomIntBetween(1, 5); + List resources = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + if (randomBoolean()) { + String suffix = randomBoolean() ? "*" : randomAlphaOfLengthBetween(4, 9); + resources.add(randomAlphaOfLengthBetween(2, 5) + "/" + suffix); + } else { + resources.add(randomAlphaOfLength(1).toLowerCase(Locale.ROOT) + randomAlphaOfLengthBetween(2, 8)); + } + } + return RoleDescriptor.ApplicationResourcePrivileges.builder() + .application(applicationName) + .resources(resources) + .privileges(randomList(1, 3, privilegeNameSupplier)) + .build(); + } + + private String randomDescription() { + StringBuilder randomDescriptionBuilder = new StringBuilder(); + int nParts = randomIntBetween(1, 5); + for (int i = 0; i < nParts; i++) { + randomDescriptionBuilder.append(randomAlphaOfLengthBetween(1, 5)); + } + return randomDescriptionBuilder.toString(); + } + + @SuppressWarnings("unchecked") + public static void waitForMigrationCompletion(RestClient adminClient, Integer migrationVersion) throws Exception { + final Request request = new Request("GET", "_cluster/state/metadata/" + INTERNAL_SECURITY_MAIN_INDEX_7); + assertBusy(() -> { + Response response = adminClient.performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + Map indicesMetadataMap = (Map) ((Map) responseMap.get("metadata")).get( + "indices" + ); + assertTrue(indicesMetadataMap.containsKey(INTERNAL_SECURITY_MAIN_INDEX_7)); + assertTrue( + ((Map) indicesMetadataMap.get(INTERNAL_SECURITY_MAIN_INDEX_7)).containsKey(MIGRATION_VERSION_CUSTOM_KEY) + ); + if (migrationVersion != null) { + assertTrue( + ((Map) ((Map) indicesMetadataMap.get(INTERNAL_SECURITY_MAIN_INDEX_7)).get( + MIGRATION_VERSION_CUSTOM_KEY + )).containsKey(MIGRATION_VERSION_CUSTOM_DATA_KEY) + ); + Integer versionInteger = Integer.parseInt( + (String) ((Map) ((Map) indicesMetadataMap.get(INTERNAL_SECURITY_MAIN_INDEX_7)).get( + MIGRATION_VERSION_CUSTOM_KEY + )).get(MIGRATION_VERSION_CUSTOM_DATA_KEY) + ); + assertThat(versionInteger, greaterThanOrEqualTo(migrationVersion)); + } + }); + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java index 223c07a1e9dec..56af8aa16360a 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java @@ -35,7 +35,7 @@ public class QueryUserIT extends SecurityInBasicRestTestCase { - private static final String READ_USERS_USER_AUTH_HEADER = "Basic cmVhZF91c2Vyc191c2VyOnJlYWQtdXNlcnMtcGFzc3dvcmQ="; + private static final String READ_SECURITY_USER_AUTH_HEADER = "Basic cmVhZF9zZWN1cml0eV91c2VyOnJlYWQtc2VjdXJpdHktcGFzc3dvcmQ="; private static final String TEST_USER_NO_READ_USERS_AUTH_HEADER = "Basic c2VjdXJpdHlfdGVzdF91c2VyOnNlY3VyaXR5LXRlc3QtcGFzc3dvcmQ="; private static final Set reservedUsers = Set.of( @@ -57,7 +57,7 @@ private Request queryUserRequestWithAuth(boolean withProfileId) { randomFrom("POST", "GET"), "/_security/_query/user" + (withProfileId ? "?with_profile_uid=true" : randomFrom("", "?with_profile_uid=false")) ); - request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_USERS_USER_AUTH_HEADER)); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); return request; } @@ -321,7 +321,7 @@ public void testSort() throws IOException { final String invalidSortName = randomFrom("email", "full_name"); assertQueryError( - READ_USERS_USER_AUTH_HEADER, + READ_SECURITY_USER_AUTH_HEADER, 400, Strings.format("{\"sort\":[\"%s\"]}", invalidSortName), Strings.format("sorting is not supported for field [%s]", invalidSortName) @@ -338,7 +338,7 @@ private String getReservedUsernameAndAssertExists() throws IOException { putUserRequest.setJsonEntity("{\"enabled\": true}"); } - request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_USERS_USER_AUTH_HEADER)); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_SECURITY_USER_AUTH_HEADER)); final Response response = client().performRequest(request); assertOK(response); final Map responseMap = responseAsMap(response); @@ -363,7 +363,7 @@ private List> collectUsers(Request request, int total) throw } private void assertQueryError(int statusCode, String body) { - assertQueryError(READ_USERS_USER_AUTH_HEADER, statusCode, body); + assertQueryError(READ_SECURITY_USER_AUTH_HEADER, statusCode, body); } private void assertQueryError(String authHeader, int statusCode, String body) { diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java index b9dfa4c7fcfd7..7cb8c09545bb1 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java @@ -23,8 +23,8 @@ public abstract class SecurityInBasicRestTestCase extends ESRestTestCase { private static final SecureString REST_PASSWORD = new SecureString("security-test-password".toCharArray()); protected static final String TEST_USER_AUTH_HEADER = "Basic c2VjdXJpdHlfdGVzdF91c2VyOnNlY3VyaXR5LXRlc3QtcGFzc3dvcmQ="; - protected static final String READ_USERS_USER = "read_users_user"; - private static final SecureString READ_USERS_PASSWORD = new SecureString("read-users-password".toCharArray()); + protected static final String READ_SECURITY_USER = "read_security_user"; + private static final SecureString READ_SECURITY_PASSWORD = new SecureString("read-security-password".toCharArray()); private static final String ADMIN_USER = "admin_user"; private static final SecureString ADMIN_PASSWORD = new SecureString("admin-password".toCharArray()); @@ -53,7 +53,7 @@ public abstract class SecurityInBasicRestTestCase extends ESRestTestCase { .user(REST_USER, REST_PASSWORD.toString(), "security_test_role", false) .user(API_KEY_USER, API_KEY_USER_PASSWORD.toString(), "api_key_user_role", false) .user(API_KEY_ADMIN_USER, API_KEY_ADMIN_USER_PASSWORD.toString(), "api_key_admin_role", false) - .user(READ_USERS_USER, READ_USERS_PASSWORD.toString(), "read_users_user_role", false) + .user(READ_SECURITY_USER, READ_SECURITY_PASSWORD.toString(), "read_security_user_role", false) .build(); @Override @@ -72,5 +72,4 @@ protected Settings restClientSettings() { String token = basicAuthHeaderValue(REST_USER, REST_PASSWORD); return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } - } diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml index 15c291274bcdb..ee7d8f7c6214f 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml @@ -18,8 +18,8 @@ api_key_user_role: cluster: - manage_own_api_key -# Used to perform query user operations -read_users_user_role: +# Used to perform query user and role operations +read_security_user_role: cluster: - read_security diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java index bdbd5c659c479..1abb9bbb067dc 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java @@ -7,9 +7,15 @@ package org.elasticsearch.xpack.security; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.TestSecurityClient; import org.elasticsearch.test.cluster.ElasticsearchCluster; @@ -24,9 +30,13 @@ import java.io.IOException; import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.elasticsearch.test.cluster.local.model.User.ROOT_USER_ROLE; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public abstract class SecurityOnTrialLicenseRestTestCase extends ESRestTestCase { private TestSecurityClient securityClient; @@ -128,4 +138,67 @@ protected ApiKey getApiKey(String id) throws IOException { final TestSecurityClient client = getSecurityClient(); return client.getApiKey(id); } + + protected void upsertRole(String roleDescriptor, String roleName) throws IOException { + Request createRoleRequest = roleRequest(roleDescriptor, roleName); + Response createRoleResponse = adminClient().performRequest(createRoleRequest); + assertOK(createRoleResponse); + } + + protected Request roleRequest(String roleDescriptor, String roleName) { + Request createRoleRequest; + if (randomBoolean()) { + createRoleRequest = new Request(randomFrom(HttpPut.METHOD_NAME, HttpPost.METHOD_NAME), "/_security/role/" + roleName); + createRoleRequest.setJsonEntity(roleDescriptor); + } else { + createRoleRequest = new Request(HttpPost.METHOD_NAME, "/_security/role"); + createRoleRequest.setJsonEntity(Strings.format(""" + {"roles": {"%s": %s}} + """, roleName, roleDescriptor)); + } + return createRoleRequest; + } + + @SuppressWarnings("unchecked") + protected void assertSendRequestThrowsError(Request request, String expectedError) throws IOException { + String errorMessage; + if (request.getEndpoint().endsWith("/role")) { + Map response = responseAsMap(adminClient().performRequest(request)); + + Map errors = (Map) response.get("errors"); + Map failedItems = (Map) errors.get("details"); + assertEquals(failedItems.size(), 1); + Map error = (Map) failedItems.values().stream().findFirst().orElseThrow(); + errorMessage = (String) error.get("reason"); + } else { + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(request)); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + errorMessage = e.getMessage(); + } + assertThat(errorMessage, containsString(expectedError)); + } + + protected void fetchRoleAndAssertEqualsExpected(final String roleName, final RoleDescriptor expectedRoleDescriptor) throws IOException { + final Response getRoleResponse = adminClient().performRequest(new Request("GET", "/_security/role/" + roleName)); + assertOK(getRoleResponse); + final Map actual = responseAsParser(getRoleResponse).map( + HashMap::new, + p -> RoleDescriptor.parserBuilder().allowDescription(true).build().parse(expectedRoleDescriptor.getName(), p) + ); + assertThat(actual, equalTo(Map.of(expectedRoleDescriptor.getName(), expectedRoleDescriptor))); + } + + protected Map upsertRoles(String roleDescriptorsByName) throws IOException { + Request request = rolesRequest(roleDescriptorsByName); + Response response = adminClient().performRequest(request); + assertOK(response); + return responseAsMap(response); + } + + protected Request rolesRequest(String roleDescriptorsByName) { + Request rolesRequest; + rolesRequest = new Request(HttpPost.METHOD_NAME, "/_security/role"); + rolesRequest.setJsonEntity(org.elasticsearch.core.Strings.format(roleDescriptorsByName)); + return rolesRequest; + } } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkDeleteRoleRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkDeleteRoleRestIT.java new file mode 100644 index 0000000000000..c0d673694a0e7 --- /dev/null +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkDeleteRoleRestIT.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.role; + +import org.apache.http.client.methods.HttpDelete; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.core.Strings; +import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; + +public class BulkDeleteRoleRestIT extends SecurityOnTrialLicenseRestTestCase { + @SuppressWarnings("unchecked") + public void testDeleteValidExistingRoles() throws Exception { + Map responseMap = upsertRoles(""" + {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test2": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["read"]}]}, "test3": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["write"]}]}}}"""); + assertThat(responseMap, not(hasKey("errors"))); + + List rolesToDelete = List.of("test1", "test3"); + Map response = deleteRoles(rolesToDelete); + List deleted = (List) response.get("deleted"); + assertThat(deleted, equalTo(rolesToDelete)); + + assertRolesDeleted(rolesToDelete); + assertRolesNotDeleted(List.of("test2")); + } + + @SuppressWarnings("unchecked") + public void testTryDeleteNonExistingRoles() throws Exception { + Map responseMap = upsertRoles(""" + {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}}}"""); + assertThat(responseMap, not(hasKey("errors"))); + + List rolesToDelete = List.of("test1", "test2", "test3"); + + Map response = deleteRoles(rolesToDelete); + List deleted = (List) response.get("deleted"); + + List notFound = (List) response.get("not_found"); + + assertThat(deleted, equalTo(List.of("test1"))); + assertThat(notFound, equalTo(List.of("test2", "test3"))); + + assertRolesDeleted(rolesToDelete); + } + + @SuppressWarnings("unchecked") + public void testTryDeleteReservedRoleName() throws Exception { + Map responseMap = upsertRoles(""" + {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}}}"""); + assertThat(responseMap, not(hasKey("errors"))); + + Map response = deleteRoles(List.of("superuser", "test1")); + + List deleted = (List) response.get("deleted"); + assertThat(deleted, equalTo(List.of("test1"))); + + Map errors = (Map) response.get("errors"); + assertThat((Integer) errors.get("count"), equalTo(1)); + Map errorDetails = (Map) ((Map) errors.get("details")).get("superuser"); + + assertThat( + errorDetails, + equalTo(Map.of("type", "illegal_argument_exception", "reason", "role [superuser] is reserved and cannot be deleted")) + ); + + assertRolesDeleted(List.of("test1")); + assertRolesNotDeleted(List.of("superuser")); + } + + protected Map deleteRoles(List roles) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_security/role"); + request.setJsonEntity(Strings.format(""" + {"names": [%s]}""", String.join(",", roles.stream().map(role -> "\"" + role + "\"").toList()))); + + Response response = adminClient().performRequest(request); + assertOK(response); + return responseAsMap(response); + } + + protected void assertRolesDeleted(List roleNames) { + for (String roleName : roleNames) { + ResponseException exception = assertThrows( + ResponseException.class, + () -> adminClient().performRequest(new Request("GET", "/_security/role/" + roleName)) + ); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + } + + protected void assertRolesNotDeleted(List roleNames) throws IOException { + for (String roleName : roleNames) { + Response response = adminClient().performRequest(new Request("GET", "/_security/role/" + roleName)); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + } + } +} diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkPutRoleRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkPutRoleRestIT.java new file mode 100644 index 0000000000000..88b952f33394e --- /dev/null +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/BulkPutRoleRestIT.java @@ -0,0 +1,315 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.role; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; + +public class BulkPutRoleRestIT extends SecurityOnTrialLicenseRestTestCase { + public void testPutManyValidRoles() throws Exception { + Map responseMap = upsertRoles(""" + {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test2": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["read"]}]}, "test3": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["write"]}]}}}"""); + assertThat(responseMap, not(hasKey("errors"))); + fetchRoleAndAssertEqualsExpected( + "test1", + new RoleDescriptor( + "test1", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + fetchRoleAndAssertEqualsExpected( + "test2", + new RoleDescriptor( + "test2", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("read").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + fetchRoleAndAssertEqualsExpected( + "test3", + new RoleDescriptor( + "test3", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("write").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + } + + @SuppressWarnings("unchecked") + public void testPutMixedValidInvalidRoles() throws Exception { + Map responseMap = upsertRoles(""" + {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test2": + {"cluster": ["bad_privilege"],"indices": [{"names": ["*"],"privileges": ["read"]}]}, "test3": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["write"]}]}}}"""); + + assertThat(responseMap, hasKey("errors")); + + List created = (List) responseMap.get("created"); + assertThat(created, hasSize(2)); + assertThat(created, contains("test1", "test3")); + + Map errors = (Map) responseMap.get("errors"); + Map failedItems = (Map) errors.get("details"); + assertEquals(failedItems.size(), 1); + + for (var entry : failedItems.entrySet()) { + Map error = (Map) entry.getValue(); + assertThat((String) error.get("reason"), containsString("unknown cluster privilege [bad_privilege]")); + } + + fetchRoleAndAssertEqualsExpected( + "test1", + new RoleDescriptor( + "test1", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + + fetchRoleAndAssertEqualsExpected( + "test3", + new RoleDescriptor( + "test3", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("write").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + + final ResponseException e = expectThrows( + ResponseException.class, + () -> adminClient().performRequest(new Request("GET", "/_security/role/test2")) + ); + assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); + } + + @SuppressWarnings("unchecked") + public void testPutNoValidRoles() throws Exception { + Map responseMap = upsertRoles(""" + {"roles": {"test1": {"cluster": ["bad_privilege"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test2": + {"cluster": ["bad_privilege"],"indices": [{"names": ["*"],"privileges": ["read"]}]}, "test3": + {"cluster": ["bad_privilege"],"indices": [{"names": ["*"],"privileges": ["write"]}]}}}"""); + + assertThat(responseMap, hasKey("errors")); + Map errors = (Map) responseMap.get("errors"); + Map failedItems = (Map) errors.get("details"); + assertEquals(failedItems.size(), 3); + + for (var entry : failedItems.entrySet()) { + Map error = (Map) entry.getValue(); + assertThat((String) error.get("reason"), containsString("unknown cluster privilege [bad_privilege]")); + } + + for (String name : List.of("test1", "test2", "test3")) { + final ResponseException e = expectThrows( + ResponseException.class, + () -> adminClient().performRequest(new Request("GET", "/_security/role/" + name)) + ); + assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); + } + } + + @SuppressWarnings("unchecked") + public void testBulkUpdates() throws Exception { + String request = """ + {"roles": {"test1": {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test2": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["read"]}], "description": "something"}, "test3": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["write"]}], "remote_indices":[{"names":["logs-*"], + "privileges":["read"],"clusters":["my_cluster*","other_cluster"]}]}}}"""; + { + Map responseMap = upsertRoles(request); + assertThat(responseMap, not(hasKey("errors"))); + + List> items = (List>) responseMap.get("created"); + assertEquals(3, items.size()); + + fetchRoleAndAssertEqualsExpected( + "test1", + new RoleDescriptor( + "test1", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + fetchRoleAndAssertEqualsExpected( + "test2", + new RoleDescriptor( + "test2", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("read").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + "something" + ) + ); + fetchRoleAndAssertEqualsExpected( + "test3", + new RoleDescriptor( + "test3", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("write").build() }, + null, + null, + null, + null, + null, + new RoleDescriptor.RemoteIndicesPrivileges[] { + RoleDescriptor.RemoteIndicesPrivileges.builder("my_cluster*", "other_cluster") + .indices("logs-*") + .privileges("read") + .build() }, + null, + null, + null + ) + ); + } + { + Map responseMap = upsertRoles(request); + assertThat(responseMap, not(hasKey("errors"))); + + List> items = (List>) responseMap.get("noop"); + assertEquals(3, items.size()); + } + { + request = """ + {"roles": {"test1": {}, "test2": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}, "test3": + {"cluster": ["all"],"indices": [{"names": ["*"],"privileges": ["all"]}]}}}"""; + + Map responseMap = upsertRoles(request); + assertThat(responseMap, not(hasKey("errors"))); + List> items = (List>) responseMap.get("updated"); + assertEquals(3, items.size()); + + assertThat(responseMap, not(hasKey("errors"))); + + fetchRoleAndAssertEqualsExpected( + "test1", + new RoleDescriptor("test1", null, null, null, null, null, null, null, null, null, null, null) + ); + fetchRoleAndAssertEqualsExpected( + "test2", + new RoleDescriptor( + "test2", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + fetchRoleAndAssertEqualsExpected( + "test3", + new RoleDescriptor( + "test3", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ) + ); + } + } +} diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithDescriptionRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithDescriptionRestIT.java index 95a650737d452..33c78f2dd6324 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithDescriptionRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithDescriptionRestIT.java @@ -7,22 +7,13 @@ package org.elasticsearch.xpack.security.role; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.core.Strings; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.support.Validation; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; public class RoleWithDescriptionRestIT extends SecurityOnTrialLicenseRestTestCase { @@ -30,15 +21,13 @@ public void testCreateOrUpdateRoleWithDescription() throws Exception { final String roleName = "role_with_description"; final String initialRoleDescription = randomAlphaOfLengthBetween(0, 10); { - Request createRoleRequest = new Request(HttpPut.METHOD_NAME, "/_security/role/" + roleName); - createRoleRequest.setJsonEntity(Strings.format(""" + upsertRole(Strings.format(""" { "description": "%s", "cluster": ["all"], "indices": [{"names": ["*"], "privileges": ["all"]}] - }""", initialRoleDescription)); - Response createResponse = adminClient().performRequest(createRoleRequest); - assertOK(createResponse); + }""", initialRoleDescription), roleName); + fetchRoleAndAssertEqualsExpected( roleName, new RoleDescriptor( @@ -60,15 +49,12 @@ public void testCreateOrUpdateRoleWithDescription() throws Exception { } { final String newRoleDescription = randomValueOtherThan(initialRoleDescription, () -> randomAlphaOfLengthBetween(0, 10)); - Request updateRoleRequest = new Request(HttpPost.METHOD_NAME, "/_security/role/" + roleName); - updateRoleRequest.setJsonEntity(Strings.format(""" + upsertRole(Strings.format(""" { "description": "%s", "cluster": ["all"], "indices": [{"names": ["index-*"], "privileges": ["all"]}] - }""", newRoleDescription)); - Response updateResponse = adminClient().performRequest(updateRoleRequest); - assertOK(updateResponse); + }""", newRoleDescription), roleName); fetchRoleAndAssertEqualsExpected( roleName, @@ -91,56 +77,37 @@ public void testCreateOrUpdateRoleWithDescription() throws Exception { } } - public void testCreateRoleWithInvalidDescriptionFails() { - Request createRoleRequest = new Request(HttpPut.METHOD_NAME, "/_security/role/role_with_large_description"); - createRoleRequest.setJsonEntity(Strings.format(""" + public void testCreateRoleWithInvalidDescriptionFails() throws IOException { + Request request = roleRequest(Strings.format(""" { "description": "%s", "cluster": ["all"], "indices": [{"names": ["*"], "privileges": ["all"]}] - }""", randomAlphaOfLength(Validation.Roles.MAX_DESCRIPTION_LENGTH + randomIntBetween(1, 5)))); + }""", randomAlphaOfLength(Validation.Roles.MAX_DESCRIPTION_LENGTH + randomIntBetween(1, 5))), "role_with_large_description"); - ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(createRoleRequest)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat( - e.getMessage(), - containsString("Role description must be less than " + Validation.Roles.MAX_DESCRIPTION_LENGTH + " characters.") + assertSendRequestThrowsError( + request, + "Role description must be less than " + Validation.Roles.MAX_DESCRIPTION_LENGTH + " characters." ); } public void testUpdateRoleWithInvalidDescriptionFails() throws IOException { - Request createRoleRequest = new Request(HttpPut.METHOD_NAME, "/_security/role/my_role"); - createRoleRequest.setJsonEntity(""" + upsertRole(""" { "cluster": ["all"], "indices": [{"names": ["*"], "privileges": ["all"]}] - }"""); - Response createRoleResponse = adminClient().performRequest(createRoleRequest); - assertOK(createRoleResponse); + }""", "my_role"); - Request updateRoleRequest = new Request(HttpPost.METHOD_NAME, "/_security/role/my_role"); - updateRoleRequest.setJsonEntity(Strings.format(""" + Request updateRoleRequest = roleRequest(Strings.format(""" { "description": "%s", "cluster": ["all"], "indices": [{"names": ["index-*"], "privileges": ["all"]}] - }""", randomAlphaOfLength(Validation.Roles.MAX_DESCRIPTION_LENGTH + randomIntBetween(1, 5)))); - - ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(updateRoleRequest)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat( - e.getMessage(), - containsString("Role description must be less than " + Validation.Roles.MAX_DESCRIPTION_LENGTH + " characters.") - ); - } + }""", randomAlphaOfLength(Validation.Roles.MAX_DESCRIPTION_LENGTH + randomIntBetween(1, 5))), "my_role"); - private void fetchRoleAndAssertEqualsExpected(final String roleName, final RoleDescriptor expectedRoleDescriptor) throws IOException { - final Response getRoleResponse = adminClient().performRequest(new Request("GET", "/_security/role/" + roleName)); - assertOK(getRoleResponse); - final Map actual = responseAsParser(getRoleResponse).map( - HashMap::new, - p -> RoleDescriptor.parserBuilder().allowDescription(true).build().parse(expectedRoleDescriptor.getName(), p) + assertSendRequestThrowsError( + updateRoleRequest, + "Role description must be less than " + Validation.Roles.MAX_DESCRIPTION_LENGTH + " characters." ); - assertThat(actual, equalTo(Map.of(expectedRoleDescriptor.getName(), expectedRoleDescriptor))); } } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java index aa5967ea7277a..93dc6c3761482 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java @@ -50,8 +50,7 @@ public void cleanup() throws IOException { } public void testRemoteIndexPrivileges() throws IOException { - var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); - putRoleRequest.setJsonEntity(""" + upsertRole(""" { "remote_indices": [ { @@ -64,9 +63,7 @@ public void testRemoteIndexPrivileges() throws IOException { } } ] - }"""); - final Response putRoleResponse1 = adminClient().performRequest(putRoleRequest); - assertOK(putRoleResponse1); + }""", REMOTE_SEARCH_ROLE); final Response getRoleResponse = adminClient().performRequest(new Request("GET", "/_security/role/" + REMOTE_SEARCH_ROLE)); assertOK(getRoleResponse); @@ -106,8 +103,7 @@ public void testRemoteIndexPrivileges() throws IOException { assertThat(e.getMessage(), containsString("action [" + TransportSearchAction.TYPE.name() + "] is unauthorized for user")); // Add local privileges and check local authorization works - putRoleRequest = new Request("PUT", "_security/role/" + REMOTE_SEARCH_ROLE); - putRoleRequest.setJsonEntity(""" + upsertRole(""" { "cluster": ["all"], "indices": [ @@ -127,9 +123,8 @@ public void testRemoteIndexPrivileges() throws IOException { } } ] - }"""); - final Response putRoleResponse2 = adminClient().performRequest(putRoleRequest); - assertOK(putRoleResponse2); + }""", REMOTE_SEARCH_ROLE); + final Response searchResponse = client().performRequest(searchRequest); assertOK(searchResponse); @@ -171,8 +166,7 @@ public void testRemoteIndexPrivileges() throws IOException { } public void testGetUserPrivileges() throws IOException { - final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); - putRoleRequest.setJsonEntity(""" + upsertRole(""" { "remote_indices": [ { @@ -191,9 +185,7 @@ public void testGetUserPrivileges() throws IOException { "clusters": ["remote-a", "*"] } ] - }"""); - final Response putRoleResponse1 = adminClient().performRequest(putRoleRequest); - assertOK(putRoleResponse1); + }""", REMOTE_SEARCH_ROLE); final Response getUserPrivilegesResponse1 = executeAsRemoteSearchUser(new Request("GET", "/_security/user/_privileges")); assertOK(getUserPrivilegesResponse1); @@ -222,8 +214,7 @@ public void testGetUserPrivileges() throws IOException { ] }"""))); - final var putRoleRequest2 = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); - putRoleRequest2.setJsonEntity(""" + upsertRole(""" { "cluster": ["all"], "indices": [ @@ -245,9 +236,7 @@ public void testGetUserPrivileges() throws IOException { "clusters": ["remote-c"] } ] - }"""); - final Response putRoleResponse2 = adminClient().performRequest(putRoleRequest2); - assertOK(putRoleResponse2); + }""", REMOTE_SEARCH_ROLE); final Response getUserPrivilegesResponse2 = executeAsRemoteSearchUser(new Request("GET", "/_security/user/_privileges")); assertOK(getUserPrivilegesResponse2); @@ -282,8 +271,7 @@ public void testGetUserPrivileges() throws IOException { } public void testGetUserPrivilegesWithMultipleFlsDlsDefinitionsPreservesGroupPerIndexPrivilege() throws IOException { - final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); - putRoleRequest.setJsonEntity(""" + upsertRole(""" { "remote_indices": [ { @@ -305,9 +293,7 @@ public void testGetUserPrivilegesWithMultipleFlsDlsDefinitionsPreservesGroupPerI } } ] - }"""); - final Response putRoleResponse1 = adminClient().performRequest(putRoleRequest); - assertOK(putRoleResponse1); + }""", REMOTE_SEARCH_ROLE); final Response getUserPrivilegesResponse1 = executeAsRemoteSearchUser(new Request("GET", "/_security/user/_privileges")); assertOK(getUserPrivilegesResponse1); diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithWorkflowsRestrictionRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithWorkflowsRestrictionRestIT.java index d2fc27fb3fcae..979fe87ec4bb5 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithWorkflowsRestrictionRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithWorkflowsRestrictionRestIT.java @@ -7,10 +7,7 @@ package org.elasticsearch.xpack.security.role; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; @@ -21,8 +18,7 @@ public class RoleWithWorkflowsRestrictionRestIT extends SecurityOnTrialLicenseRestTestCase { public void testCreateRoleWithWorkflowsRestrictionFail() { - Request createRoleRequest = new Request(HttpPut.METHOD_NAME, "/_security/role/role_with_restriction"); - createRoleRequest.setJsonEntity(""" + Request request = roleRequest(""" { "cluster": ["all"], "indices": [ @@ -34,16 +30,15 @@ public void testCreateRoleWithWorkflowsRestrictionFail() { "restriction":{ "workflows": ["foo", "bar"] } - }"""); + }""", "role_with_restriction"); - ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(createRoleRequest)); + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(request)); assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); assertThat(e.getMessage(), containsString("failed to parse role [role_with_restriction]. unexpected field [restriction]")); } public void testUpdateRoleWithWorkflowsRestrictionFail() throws IOException { - Request createRoleRequest = new Request(HttpPut.METHOD_NAME, "/_security/role/my_role"); - createRoleRequest.setJsonEntity(""" + upsertRole(""" { "cluster": ["all"], "indices": [ @@ -52,12 +47,9 @@ public void testUpdateRoleWithWorkflowsRestrictionFail() throws IOException { "privileges": ["all"] } ] - }"""); - Response createRoleResponse = adminClient().performRequest(createRoleRequest); - assertOK(createRoleResponse); + }""", "my_role"); - Request updateRoleRequest = new Request(HttpPost.METHOD_NAME, "/_security/role/my_role"); - updateRoleRequest.setJsonEntity(""" + Request updateRoleRequest = roleRequest(""" { "cluster": ["all"], "indices": [ @@ -69,7 +61,7 @@ public void testUpdateRoleWithWorkflowsRestrictionFail() throws IOException { "restriction":{ "workflows": ["foo", "bar"] } - }"""); + }""", "my_role"); ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(updateRoleRequest)); assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index 1be8f543ebcb3..704d8b75d9ed3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -884,7 +884,7 @@ public void testKnnSearch() throws Exception { // Since there's no kNN search action at the transport layer, we just emulate // how the action works (it builds a kNN query under the hood) float[] queryVector = new float[] { 0.0f, 0.0f, 0.0f }; - KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("vector", queryVector, 50, null); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("vector", queryVector, 50, 50, null); if (randomBoolean()) { query.addFilterQuery(new WildcardQueryBuilder("other", "value*")); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 849f5d1a48c5e..bffa53b1f4da6 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -441,7 +441,7 @@ public void testKnnSearch() throws IOException { // Since there's no kNN search action at the transport layer, we just emulate // how the action works (it builds a kNN query under the hood) float[] queryVector = new float[] { 0.0f, 0.0f, 0.0f }; - KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("vector", queryVector, 10, null); + KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("vector", queryVector, 10, 10, null); // user1 has access to vector field, so the query should match with the document: assertResponse( @@ -475,7 +475,7 @@ public void testKnnSearch() throws IOException { } ); // user1 can access field1, so the filtered query should match with the document: - KnnVectorQueryBuilder filterQuery1 = new KnnVectorQueryBuilder("vector", queryVector, 10, null).addFilterQuery( + KnnVectorQueryBuilder filterQuery1 = new KnnVectorQueryBuilder("vector", queryVector, 10, 10, null).addFilterQuery( QueryBuilders.matchQuery("field1", "value1") ); assertHitCount( @@ -486,7 +486,7 @@ public void testKnnSearch() throws IOException { ); // user1 cannot access field2, so the filtered query should not match with the document: - KnnVectorQueryBuilder filterQuery2 = new KnnVectorQueryBuilder("vector", queryVector, 10, null).addFilterQuery( + KnnVectorQueryBuilder filterQuery2 = new KnnVectorQueryBuilder("vector", queryVector, 10, 10, null).addFilterQuery( QueryBuilders.matchQuery("field2", "value2") ); assertHitCount( diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java index 2eb45021a5bfe..07bdd83c9a144 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.MockSecureSettings; @@ -26,7 +27,9 @@ import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.security.support.SecurityMigrations; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -41,10 +44,9 @@ import java.util.concurrent.CountDownLatch; import java.util.stream.Collectors; -import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId; import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.elasticsearch.xpack.core.security.support.SecurityMigrationTaskParams.TASK_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.getMigrationVersionFromIndexMetadata; import static org.hamcrest.Matchers.hasItem; /** @@ -90,7 +92,12 @@ public void tearDown() throws Exception { } private boolean isMigrationComplete(ClusterState state) { - return getTaskWithId(state, TASK_NAME) == null; + IndexMetadata indexMetadata = state.metadata().index(TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7); + if (indexMetadata == null) { + // If index doesn't exist, no migration needed + return true; + } + return getMigrationVersionFromIndexMetadata(indexMetadata) == SecurityMigrations.MIGRATIONS_BY_VERSION.lastKey(); } private void awaitSecurityMigration() { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java index 2ced54a513146..435706dce7019 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java @@ -52,7 +52,6 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Realm; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.elasticsearch.xpack.security.Security; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java index 3094a10b1572d..d11ca70744b7b 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java @@ -116,7 +116,6 @@ public void configureApplicationPrivileges() { assertEquals(6, putPrivilegesResponse.created().values().stream().mapToInt(List::size).sum()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109894") public void testGetPrivilegesUsesCache() { final Client client = client(); @@ -205,7 +204,6 @@ public void testPopulationOfCacheWhenLoadingPrivilegesForAllApplications() { assertEquals(1, new GetPrivilegesRequestBuilder(client).application("app-1").privileges("write").get().privileges().length); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109895") public void testSuffixWildcard() { final Client client = client(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4TransportCloseNotifyIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4TransportCloseNotifyIT.java new file mode 100644 index 0000000000000..b949d3ea8371a --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4TransportCloseNotifyIT.java @@ -0,0 +1,163 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.support.CancellableActionTestPlugin; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.SecurityIntegTestCase; + +import java.util.Collection; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; +import static org.elasticsearch.test.TaskAssertions.assertAllTasksHaveFinished; +import static org.elasticsearch.test.rest.ESRestTestCase.basicAuthHeaderValue; + +public class SecurityNetty4TransportCloseNotifyIT extends SecurityIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + final Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); + addSSLSettingsForNodePEMFiles(builder, "xpack.security.http.", randomBoolean()); + return builder.put("xpack.security.http.ssl.enabled", true).build(); + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), CancellableActionTestPlugin.class); + } + + private static Bootstrap setupNettyClient(String node, Consumer responseHandler) throws Exception { + var sslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build(); + var httpServer = internalCluster().getInstance(HttpServerTransport.class, node); + var remoteAddr = randomFrom(httpServer.boundAddress().boundAddresses()); + return new Bootstrap().group(new NioEventLoopGroup(1)) + .channel(NioSocketChannel.class) + .remoteAddress(remoteAddr.getAddress(), remoteAddr.getPort()) + .handler(new ChannelInitializer() { + @Override + protected void initChannel(SocketChannel ch) { + var p = ch.pipeline(); + p.addLast(sslCtx.newHandler(ch.alloc())); + p.addLast(new HttpRequestEncoder()); + p.addLast(new HttpResponseDecoder()); + p.addLast(new HttpObjectAggregator(4096)); + p.addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) { + responseHandler.accept(msg); + } + }); + } + }); + } + + /** + * Ensures that receiving close_notify on server will close connection. + * Simulates normal connection flow where client and server exchange a few requests and responses. + * After an exchange client sends close_notify and expects the server to close connection. + */ + public void testSendCloseNotifyAfterHttpGetRequests() throws Exception { + final var nReq = randomIntBetween(0, 10); // nothing particular about number 10 + final var responsesReceivedLatch = new CountDownLatch(nReq); + final var client = setupNettyClient(internalCluster().getRandomNodeName(), response -> { + assertEquals(200, response.status().code()); + responsesReceivedLatch.countDown(); + }); + try { + var channel = client.connect().sync().channel(); + + // send some HTTP GET requests before closing a channel + for (int i = 0; i < nReq; i++) { + channel.write(newHttpGetReq("/")); + if (randomBoolean()) { + channel.flush(); + } + } + channel.flush(); + safeAwait(responsesReceivedLatch); + + // send close_notify alert and wait for channel closure + var sslHandler = channel.pipeline().get(SslHandler.class); + sslHandler.closeOutbound(); + try { + assertTrue("server must close connection", channel.closeFuture().await(SAFE_AWAIT_TIMEOUT.millis())); + } finally { + channel.close().sync(); + } + } finally { + client.config().group().shutdownGracefully().sync(); + } + } + + /** + * Ensures that receiving close_notify will close connection and cancel running action. + */ + public void testSendCloseNotifyCancelAction() throws Exception { + var node = internalCluster().getRandomNodeName(); + var indexName = "close-notify-cancel"; + createIndex(indexName); + ensureGreen(indexName); + var gotResponse = new AtomicBoolean(false); + var client = setupNettyClient(node, resp -> gotResponse.set(true)); + var actionName = ClusterStateAction.NAME; + try (var capturingAction = CancellableActionTestPlugin.capturingActionOnNode(actionName, node)) { + var channel = client.connect().sync().channel(); + var req = newHttpGetReq("/_cluster/state"); + channel.writeAndFlush(req); + var ssl = channel.pipeline().get(SslHandler.class); + capturingAction.captureAndCancel(ssl::closeOutbound); + try { + assertTrue("server must close connection", channel.closeFuture().await(SAFE_AWAIT_TIMEOUT.millis())); + assertAllTasksHaveFinished(actionName); + assertFalse("must cancel action before http response", gotResponse.get()); + } finally { + channel.close().sync(); + } + } finally { + client.config().group().shutdownGracefully().sync(); + } + } + + private DefaultFullHttpRequest newHttpGetReq(String uri) { + var req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); + req.headers().add(HttpHeaderNames.AUTHORIZATION, basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword())); + return req; + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 404b9b85e2b24..11c688e9ee5eb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -160,6 +160,7 @@ import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRoleRequestBuilderFactory; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; @@ -253,10 +254,13 @@ import org.elasticsearch.xpack.security.action.profile.TransportSuggestProfilesAction; import org.elasticsearch.xpack.security.action.profile.TransportUpdateProfileDataAction; import org.elasticsearch.xpack.security.action.realm.TransportClearRealmCacheAction; +import org.elasticsearch.xpack.security.action.role.TransportBulkDeleteRolesAction; +import org.elasticsearch.xpack.security.action.role.TransportBulkPutRolesAction; import org.elasticsearch.xpack.security.action.role.TransportClearRolesCacheAction; import org.elasticsearch.xpack.security.action.role.TransportDeleteRoleAction; import org.elasticsearch.xpack.security.action.role.TransportGetRolesAction; import org.elasticsearch.xpack.security.action.role.TransportPutRoleAction; +import org.elasticsearch.xpack.security.action.role.TransportQueryRoleAction; import org.elasticsearch.xpack.security.action.rolemapping.ReservedRoleMappingAction; import org.elasticsearch.xpack.security.action.rolemapping.TransportDeleteRoleMappingAction; import org.elasticsearch.xpack.security.action.rolemapping.TransportGetRoleMappingsAction; @@ -370,10 +374,13 @@ import org.elasticsearch.xpack.security.rest.action.profile.RestSuggestProfilesAction; import org.elasticsearch.xpack.security.rest.action.profile.RestUpdateProfileDataAction; import org.elasticsearch.xpack.security.rest.action.realm.RestClearRealmCacheAction; +import org.elasticsearch.xpack.security.rest.action.role.RestBulkDeleteRolesAction; +import org.elasticsearch.xpack.security.rest.action.role.RestBulkPutRolesAction; import org.elasticsearch.xpack.security.rest.action.role.RestClearRolesCacheAction; import org.elasticsearch.xpack.security.rest.action.role.RestDeleteRoleAction; import org.elasticsearch.xpack.security.rest.action.role.RestGetRolesAction; import org.elasticsearch.xpack.security.rest.action.role.RestPutRoleAction; +import org.elasticsearch.xpack.security.rest.action.role.RestQueryRoleAction; import org.elasticsearch.xpack.security.rest.action.rolemapping.RestDeleteRoleMappingAction; import org.elasticsearch.xpack.security.rest.action.rolemapping.RestGetRoleMappingsAction; import org.elasticsearch.xpack.security.rest.action.rolemapping.RestPutRoleMappingAction; @@ -601,6 +608,7 @@ public class Security extends Plugin private final SetOnce scriptServiceReference = new SetOnce<>(); private final SetOnce operatorOnlyRegistry = new SetOnce<>(); private final SetOnce putRoleRequestBuilderFactory = new SetOnce<>(); + private final SetOnce bulkPutRoleRequestBuilderFactory = new SetOnce<>(); private final SetOnce createApiKeyRequestBuilderFactory = new SetOnce<>(); private final SetOnce updateApiKeyRequestTranslator = new SetOnce<>(); private final SetOnce bulkUpdateApiKeyRequestTranslator = new SetOnce<>(); @@ -788,7 +796,8 @@ Collection createComponents( this.persistentTasksService.set(persistentTasksService); systemIndices.getMainIndexManager().addStateListener((oldState, newState) -> { - if (clusterService.state().nodes().isLocalNodeElectedMaster()) { + // Only consider applying migrations if it's the master node and the security index exists + if (clusterService.state().nodes().isLocalNodeElectedMaster() && newState.indexExists()) { applyPendingSecurityMigrations(newState); } }); @@ -910,19 +919,14 @@ Collection createComponents( dlsBitsetCache.set(new DocumentSubsetBitsetCache(settings, threadPool)); final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(settings); - final NativeRolesStore nativeRolesStore = new NativeRolesStore( - settings, - client, - getLicenseState(), - systemIndices.getMainIndexManager(), - clusterService, - featureService - ); RoleDescriptor.setFieldPermissionsCache(fieldPermissionsCache); // Need to set to default if it wasn't set by an extension if (putRoleRequestBuilderFactory.get() == null) { putRoleRequestBuilderFactory.set(new PutRoleRequestBuilderFactory.Default()); } + if (bulkPutRoleRequestBuilderFactory.get() == null) { + bulkPutRoleRequestBuilderFactory.set(new BulkPutRoleRequestBuilderFactory.Default()); + } if (createApiKeyRequestBuilderFactory.get() == null) { createApiKeyRequestBuilderFactory.set(new CreateApiKeyRequestBuilderFactory.Default()); } @@ -950,7 +954,7 @@ Collection createComponents( this.fileRolesStore.set( new FileRolesStore(settings, environment, resourceWatcherService, getLicenseState(), xContentRegistry, fileRoleValidator.get()) ); - final ReservedRoleNameChecker reservedRoleNameChecker = reservedRoleNameCheckerFactory.get().create(fileRolesStore.get()::exists); + ReservedRoleNameChecker reservedRoleNameChecker = reservedRoleNameCheckerFactory.get().create(fileRolesStore.get()::exists); components.add(new PluginComponentBinding<>(ReservedRoleNameChecker.class, reservedRoleNameChecker)); final Map, ActionListener>>> customRoleProviders = new LinkedHashMap<>(); @@ -963,6 +967,17 @@ Collection createComponents( } } + final NativeRolesStore nativeRolesStore = new NativeRolesStore( + settings, + client, + getLicenseState(), + systemIndices.getMainIndexManager(), + clusterService, + featureService, + reservedRoleNameChecker, + xContentRegistry + ); + final ApiKeyService apiKeyService = new ApiKeyService( settings, Clock.systemUTC(), @@ -1203,43 +1218,53 @@ Collection createComponents( } private void applyPendingSecurityMigrations(SecurityIndexManager.State newState) { + // If no migrations have been applied and the security index is on the latest version (new index), all migrations can be skipped + if (newState.migrationsVersion == 0 && newState.createdOnLatestVersion) { + submitPersistentMigrationTask(SecurityMigrations.MIGRATIONS_BY_VERSION.lastKey(), false); + return; + } + Map.Entry nextMigration = SecurityMigrations.MIGRATIONS_BY_VERSION.higherEntry( newState.migrationsVersion ); - if (nextMigration == null) { - return; - } - // Check if next migration that has not been applied is eligible to run on the current cluster - if (systemIndices.getMainIndexManager().isEligibleSecurityMigration(nextMigration.getValue()) == false) { + if (nextMigration == null || systemIndices.getMainIndexManager().isEligibleSecurityMigration(nextMigration.getValue()) == false) { // Reset retry counter if all eligible migrations have been applied successfully nodeLocalMigrationRetryCount.set(0); } else if (nodeLocalMigrationRetryCount.get() > MAX_SECURITY_MIGRATION_RETRY_COUNT) { logger.warn("Security migration failed [" + nodeLocalMigrationRetryCount.get() + "] times, restart node to retry again."); } else if (systemIndices.getMainIndexManager().isReadyForSecurityMigration(nextMigration.getValue())) { - nodeLocalMigrationRetryCount.incrementAndGet(); - persistentTasksService.get() - .sendStartRequest( - SecurityMigrationTaskParams.TASK_NAME, - SecurityMigrationTaskParams.TASK_NAME, - new SecurityMigrationTaskParams(newState.migrationsVersion), - null, - ActionListener.wrap((response) -> { - logger.debug("Security migration task submitted"); - }, (exception) -> { - // Do nothing if the task is already in progress - if (ExceptionsHelper.unwrapCause(exception) instanceof ResourceAlreadyExistsException) { - // Do not count ResourceAlreadyExistsException as failure - nodeLocalMigrationRetryCount.decrementAndGet(); - } else { - logger.warn("Submit security migration task failed: " + exception.getCause()); - } - }) - ); + submitPersistentMigrationTask(newState.migrationsVersion); } } + private void submitPersistentMigrationTask(int migrationsVersion) { + submitPersistentMigrationTask(migrationsVersion, true); + } + + private void submitPersistentMigrationTask(int migrationsVersion, boolean securityMigrationNeeded) { + nodeLocalMigrationRetryCount.incrementAndGet(); + persistentTasksService.get() + .sendStartRequest( + SecurityMigrationTaskParams.TASK_NAME, + SecurityMigrationTaskParams.TASK_NAME, + new SecurityMigrationTaskParams(migrationsVersion, securityMigrationNeeded), + null, + ActionListener.wrap((response) -> { + logger.debug("Security migration task submitted"); + }, (exception) -> { + // Do nothing if the task is already in progress + if (ExceptionsHelper.unwrapCause(exception) instanceof ResourceAlreadyExistsException) { + // Do not count ResourceAlreadyExistsException as failure + nodeLocalMigrationRetryCount.decrementAndGet(); + } else { + logger.warn("Submit security migration task failed: " + exception.getCause()); + } + }) + ); + } + private AuthorizationEngine getAuthorizationEngine() { return findValueFromExtensions("authorization engine", extension -> extension.getAuthorizationEngine(settings)); } @@ -1514,7 +1539,10 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(PutUserAction.INSTANCE, TransportPutUserAction.class), new ActionHandler<>(DeleteUserAction.INSTANCE, TransportDeleteUserAction.class), new ActionHandler<>(GetRolesAction.INSTANCE, TransportGetRolesAction.class), + new ActionHandler<>(ActionTypes.QUERY_ROLE_ACTION, TransportQueryRoleAction.class), new ActionHandler<>(PutRoleAction.INSTANCE, TransportPutRoleAction.class), + new ActionHandler<>(ActionTypes.BULK_PUT_ROLES, TransportBulkPutRolesAction.class), + new ActionHandler<>(ActionTypes.BULK_DELETE_ROLES, TransportBulkDeleteRolesAction.class), new ActionHandler<>(DeleteRoleAction.INSTANCE, TransportDeleteRoleAction.class), new ActionHandler<>(TransportChangePasswordAction.TYPE, TransportChangePasswordAction.class), new ActionHandler<>(AuthenticateAction.INSTANCE, TransportAuthenticateAction.class), @@ -1608,6 +1636,9 @@ public List getRestHandlers( new RestPutUserAction(settings, getLicenseState()), new RestDeleteUserAction(settings, getLicenseState()), new RestGetRolesAction(settings, getLicenseState()), + new RestQueryRoleAction(settings, getLicenseState()), + new RestBulkPutRolesAction(settings, getLicenseState(), bulkPutRoleRequestBuilderFactory.get()), + new RestBulkDeleteRolesAction(settings, getLicenseState()), new RestPutRoleAction(settings, getLicenseState(), putRoleRequestBuilderFactory.get()), new RestDeleteRoleAction(settings, getLicenseState()), new RestChangePasswordAction(settings, securityContext.get(), getLicenseState()), @@ -2246,6 +2277,7 @@ public void loadExtensions(ExtensionLoader loader) { securityExtensions.addAll(loader.loadExtensions(SecurityExtension.class)); loadSingletonExtensionAndSetOnce(loader, operatorOnlyRegistry, OperatorOnlyRegistry.class); loadSingletonExtensionAndSetOnce(loader, putRoleRequestBuilderFactory, PutRoleRequestBuilderFactory.class); + // TODO add bulkPutRoleRequestBuilderFactory loading here when available loadSingletonExtensionAndSetOnce(loader, getBuiltinPrivilegesResponseTranslator, GetBuiltinPrivilegesResponseTranslator.class); loadSingletonExtensionAndSetOnce(loader, updateApiKeyRequestTranslator, UpdateApiKeyRequestTranslator.class); loadSingletonExtensionAndSetOnce(loader, bulkUpdateApiKeyRequestTranslator, BulkUpdateApiKeyRequestTranslator.class); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java index 667b513555594..fffcb476abaa4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportGrantAction.java @@ -7,24 +7,33 @@ package org.elasticsearch.xpack.security.action; +import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.Grant; import org.elasticsearch.xpack.core.security.action.GrantRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.support.BearerToken; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.security.authz.AuthorizationService; +import static org.elasticsearch.xpack.core.security.action.Grant.ACCESS_TOKEN_GRANT_TYPE; +import static org.elasticsearch.xpack.core.security.action.Grant.PASSWORD_GRANT_TYPE; + public abstract class TransportGrantAction extends TransportAction< Request, Response> { @@ -50,7 +59,7 @@ public TransportGrantAction( @Override public final void doExecute(Task task, Request request, ActionListener listener) { try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - final AuthenticationToken authenticationToken = request.getGrant().getAuthenticationToken(); + final AuthenticationToken authenticationToken = getAuthenticationToken(request.getGrant()); assert authenticationToken != null : "authentication token must not be null"; final String runAsUsername = request.getGrant().getRunAsUsername(); @@ -109,4 +118,30 @@ protected abstract void doExecuteWithGrantAuthentication( Authentication authentication, ActionListener listener ); + + public static AuthenticationToken getAuthenticationToken(Grant grant) { + assert grant.validate(null) == null : "grant is invalid"; + return switch (grant.getType()) { + case PASSWORD_GRANT_TYPE -> new UsernamePasswordToken(grant.getUsername(), grant.getPassword()); + case ACCESS_TOKEN_GRANT_TYPE -> { + SecureString clientAuthentication = grant.getClientAuthentication() != null + ? grant.getClientAuthentication().value() + : null; + AuthenticationToken token = JwtAuthenticationToken.tryParseJwt(grant.getAccessToken(), clientAuthentication); + if (token != null) { + yield token; + } + if (clientAuthentication != null) { + clientAuthentication.close(); + throw new ElasticsearchSecurityException( + "[client_authentication] not supported with the supplied access_token type", + RestStatus.BAD_REQUEST + ); + } + // here we effectively assume it's an ES access token (from the {@code TokenService}) + yield new BearerToken(grant.getAccessToken()); + } + default -> throw new ElasticsearchSecurityException("the grant type [{}] is not supported", grant.getType()); + }; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportBulkDeleteRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportBulkDeleteRolesAction.java new file mode 100644 index 0000000000000..1bd9e6e108e45 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportBulkDeleteRolesAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkRolesResponse; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; + +public class TransportBulkDeleteRolesAction extends TransportAction { + + private final NativeRolesStore rolesStore; + + @Inject + public TransportBulkDeleteRolesAction(ActionFilters actionFilters, NativeRolesStore rolesStore, TransportService transportService) { + super(ActionTypes.BULK_DELETE_ROLES.name(), actionFilters, transportService.getTaskManager()); + this.rolesStore = rolesStore; + } + + @Override + protected void doExecute(Task task, BulkDeleteRolesRequest request, ActionListener listener) { + rolesStore.deleteRoles(request.getRoleNames(), request.getRefreshPolicy(), listener); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportBulkPutRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportBulkPutRolesAction.java new file mode 100644 index 0000000000000..19972e90bdbbe --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportBulkPutRolesAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkRolesResponse; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; + +public class TransportBulkPutRolesAction extends TransportAction { + private final NativeRolesStore rolesStore; + + @Inject + public TransportBulkPutRolesAction(ActionFilters actionFilters, NativeRolesStore rolesStore, TransportService transportService) { + super(ActionTypes.BULK_PUT_ROLES.name(), actionFilters, transportService.getTaskManager()); + this.rolesStore = rolesStore; + } + + @Override + protected void doExecute(Task task, final BulkPutRolesRequest request, final ActionListener listener) { + rolesStore.putRoles(request.getRefreshPolicy(), request.getRoles(), listener); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleAction.java index 87b9bb72884be..c22e460728dca 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleAction.java @@ -6,75 +6,36 @@ */ package org.elasticsearch.xpack.security.action.role; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; -import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; -import org.elasticsearch.xpack.security.authz.ReservedRoleNameChecker; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; -import static org.elasticsearch.action.ValidateActions.addValidationError; - public class TransportPutRoleAction extends TransportAction { private final NativeRolesStore rolesStore; - private final NamedXContentRegistry xContentRegistry; - private final ReservedRoleNameChecker reservedRoleNameChecker; @Inject - public TransportPutRoleAction( - ActionFilters actionFilters, - NativeRolesStore rolesStore, - TransportService transportService, - NamedXContentRegistry xContentRegistry, - ReservedRoleNameChecker reservedRoleNameChecker - ) { + public TransportPutRoleAction(ActionFilters actionFilters, NativeRolesStore rolesStore, TransportService transportService) { super(PutRoleAction.NAME, actionFilters, transportService.getTaskManager()); this.rolesStore = rolesStore; - this.xContentRegistry = xContentRegistry; - this.reservedRoleNameChecker = reservedRoleNameChecker; } @Override protected void doExecute(Task task, final PutRoleRequest request, final ActionListener listener) { - final Exception validationException = validateRequest(request); - if (validationException != null) { - listener.onFailure(validationException); - } else { - rolesStore.putRole(request, request.roleDescriptor(), listener.safeMap(created -> { - if (created) { - logger.info("added role [{}]", request.name()); - } else { - logger.info("updated role [{}]", request.name()); - } - return new PutRoleResponse(created); - })); - } - } - - private Exception validateRequest(final PutRoleRequest request) { - // TODO we can remove this -- `execute()` already calls `request.validate()` before `doExecute()` - ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { - return validationException; - } - if (reservedRoleNameChecker.isReserved(request.name())) { - throw addValidationError("Role [" + request.name() + "] is reserved and may not be used.", null); - } - try { - DLSRoleQueryValidator.validateQueryField(request.roleDescriptor().getIndicesPrivileges(), xContentRegistry); - } catch (ElasticsearchException | IllegalArgumentException e) { - return e; - } - return null; + rolesStore.putRole(request.getRefreshPolicy(), request.roleDescriptor(), listener.safeMap(created -> { + if (created) { + logger.info("added role [{}]", request.name()); + } else { + logger.info("updated role [{}]", request.name()); + } + return new PutRoleResponse(created); + })); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportQueryRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportQueryRoleAction.java new file mode 100644 index 0000000000000..d6bdfea29a676 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportQueryRoleAction.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.role.QueryRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.QueryRoleResponse; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.elasticsearch.xpack.security.support.FieldNameTranslators; +import org.elasticsearch.xpack.security.support.RoleBoolQueryBuilder; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.xpack.security.support.FieldNameTranslators.ROLE_FIELD_NAME_TRANSLATORS; + +public class TransportQueryRoleAction extends TransportAction { + + public static final String ROLE_NAME_RUNTIME_MAPPING_FIELD = "runtime_role_name"; + private static final Map ROLE_NAME_RUNTIME_MAPPING = Map.of( + ROLE_NAME_RUNTIME_MAPPING_FIELD, + Map.of("type", "keyword", "script", Map.of("source", "emit(params._fields['_id'].value.substring(5));")) + ); + + private final NativeRolesStore nativeRolesStore; + + @Inject + public TransportQueryRoleAction(ActionFilters actionFilters, NativeRolesStore nativeRolesStore, TransportService transportService) { + super(ActionTypes.QUERY_ROLE_ACTION.name(), actionFilters, transportService.getTaskManager()); + this.nativeRolesStore = nativeRolesStore; + } + + @Override + protected void doExecute(Task task, QueryRoleRequest request, ActionListener listener) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource().version(false).fetchSource(true).trackTotalHits(true); + if (request.getFrom() != null) { + searchSourceBuilder.from(request.getFrom()); + } + if (request.getSize() != null) { + searchSourceBuilder.size(request.getSize()); + } + if (request.getSearchAfterBuilder() != null) { + searchSourceBuilder.searchAfter(request.getSearchAfterBuilder().getSortValues()); + } + AtomicBoolean accessesMetadata = new AtomicBoolean(false); + AtomicBoolean accessesRoleName = new AtomicBoolean(false); + searchSourceBuilder.query(RoleBoolQueryBuilder.build(request.getQueryBuilder(), indexFieldName -> { + if (indexFieldName.startsWith(FieldNameTranslators.FLATTENED_METADATA_INDEX_FIELD_NAME)) { + accessesMetadata.set(true); + } else if (indexFieldName.equals(ROLE_NAME_RUNTIME_MAPPING_FIELD)) { + accessesRoleName.set(true); + } + })); + if (request.getFieldSortBuilders() != null) { + ROLE_FIELD_NAME_TRANSLATORS.translateFieldSortBuilders(request.getFieldSortBuilders(), searchSourceBuilder, indexFieldName -> { + if (indexFieldName.startsWith(FieldNameTranslators.FLATTENED_METADATA_INDEX_FIELD_NAME)) { + accessesMetadata.set(true); + } else if (indexFieldName.equals(ROLE_NAME_RUNTIME_MAPPING_FIELD)) { + accessesRoleName.set(true); + } + }); + } + if (accessesMetadata.get() && nativeRolesStore.isMetadataSearchable() == false) { + listener.onFailure( + new ElasticsearchStatusException( + "Cannot query or sort role metadata until automatic migration completed", + RestStatus.SERVICE_UNAVAILABLE + ) + ); + return; + } + // only add the query-level runtime field to the search request if it's actually referring the role name + if (accessesRoleName.get()) { + searchSourceBuilder.runtimeMappings(ROLE_NAME_RUNTIME_MAPPING); + } + nativeRolesStore.queryRoleDescriptors( + searchSourceBuilder, + ActionListener.wrap( + queryRoleResults -> listener.onResponse(new QueryRoleResponse(queryRoleResults.total(), queryRoleResults.items())), + listener::onFailure + ) + ); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 01104806c4a1c..bc5cc4a5e6b3f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -44,6 +44,7 @@ import org.elasticsearch.xcontent.json.JsonStringEncoder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.Grant; import org.elasticsearch.xpack.core.security.action.apikey.AbstractCreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.BaseSingleUpdateApiKeyRequest; @@ -72,6 +73,8 @@ import org.elasticsearch.xpack.core.security.action.profile.SetProfileEnabledRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; @@ -291,6 +294,8 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { PutUserAction.NAME, PutRoleAction.NAME, PutRoleMappingAction.NAME, + ActionTypes.BULK_PUT_ROLES.name(), + ActionTypes.BULK_DELETE_ROLES.name(), TransportSetEnabledAction.TYPE.name(), TransportChangePasswordAction.TYPE.name(), CreateApiKeyAction.NAME, @@ -731,6 +736,11 @@ public void accessGranted( } else if (msg instanceof PutRoleRequest) { assert PutRoleAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((PutRoleRequest) msg).build(); + } else if (msg instanceof BulkPutRolesRequest bulkPutRolesRequest) { + assert ActionTypes.BULK_PUT_ROLES.name().equals(action); + for (RoleDescriptor roleDescriptor : bulkPutRolesRequest.getRoles()) { + securityChangeLogEntryBuilder(requestId).withRequestBody(roleDescriptor.getName(), roleDescriptor).build(); + } } else if (msg instanceof PutRoleMappingRequest) { assert PutRoleMappingAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((PutRoleMappingRequest) msg).build(); @@ -755,6 +765,11 @@ public void accessGranted( } else if (msg instanceof DeleteRoleRequest) { assert DeleteRoleAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((DeleteRoleRequest) msg).build(); + } else if (msg instanceof BulkDeleteRolesRequest bulkDeleteRolesRequest) { + assert ActionTypes.BULK_DELETE_ROLES.name().equals(action); + for (String roleName : bulkDeleteRolesRequest.getRoleNames()) { + securityChangeLogEntryBuilder(requestId).withDeleteRole(roleName).build(); + } } else if (msg instanceof DeleteRoleMappingRequest) { assert DeleteRoleMappingAction.NAME.equals(action); securityChangeLogEntryBuilder(requestId).withRequestBody((DeleteRoleMappingRequest) msg).build(); @@ -1160,15 +1175,19 @@ LogEntryBuilder withRequestBody(ChangePasswordRequest changePasswordRequest) thr } LogEntryBuilder withRequestBody(PutRoleRequest putRoleRequest) throws IOException { + return withRequestBody(putRoleRequest.name(), putRoleRequest.roleDescriptor()); + } + + LogEntryBuilder withRequestBody(String roleName, RoleDescriptor roleDescriptor) throws IOException { logEntry.with(EVENT_ACTION_FIELD_NAME, "put_role"); XContentBuilder builder = JsonXContent.contentBuilder().humanReadable(true); builder.startObject() .startObject("role") - .field("name", putRoleRequest.name()) + .field("name", roleName) // the "role_descriptor" nested structure, where the "name" is left out, is closer to the event structure // for creating API Keys .field("role_descriptor"); - withRoleDescriptor(builder, putRoleRequest.roleDescriptor()); + withRoleDescriptor(builder, roleDescriptor); builder.endObject() // role .endObject(); logEntry.with(PUT_CONFIG_FIELD_NAME, Strings.toString(builder)); @@ -1350,7 +1369,7 @@ private static void withRoleDescriptor(XContentBuilder builder, RoleDescriptor r withIndicesPrivileges(builder, indicesPrivileges); } builder.endArray(); - // the toXContent method of the {@code RoleDescriptor.ApplicationResourcePrivileges) does a good job + // the toXContent method of the {@code RoleDescriptor.ApplicationResourcePrivileges} does a good job builder.xContentList(RoleDescriptor.Fields.APPLICATIONS.getPreferredName(), roleDescriptor.getApplicationPrivileges()); builder.array(RoleDescriptor.Fields.RUN_AS.getPreferredName(), roleDescriptor.getRunAs()); if (roleDescriptor.getMetadata() != null && false == roleDescriptor.getMetadata().isEmpty()) { @@ -1401,15 +1420,7 @@ LogEntryBuilder withRequestBody(DeleteUserRequest deleteUserRequest) throws IOEx } LogEntryBuilder withRequestBody(DeleteRoleRequest deleteRoleRequest) throws IOException { - logEntry.with(EVENT_ACTION_FIELD_NAME, "delete_role"); - XContentBuilder builder = JsonXContent.contentBuilder().humanReadable(true); - builder.startObject() - .startObject("role") - .field("name", deleteRoleRequest.name()) - .endObject() // role - .endObject(); - logEntry.with(DELETE_CONFIG_FIELD_NAME, Strings.toString(builder)); - return this; + return withDeleteRole(deleteRoleRequest.name()); } LogEntryBuilder withRequestBody(DeleteRoleMappingRequest deleteRoleMappingRequest) throws IOException { @@ -1532,6 +1543,18 @@ LogEntryBuilder withRequestBody(SetProfileEnabledRequest setProfileEnabledReques return this; } + LogEntryBuilder withDeleteRole(String roleName) throws IOException { + logEntry.with(EVENT_ACTION_FIELD_NAME, "delete_role"); + XContentBuilder builder = JsonXContent.contentBuilder().humanReadable(true); + builder.startObject() + .startObject("role") + .field("name", roleName) + .endObject() // role + .endObject(); + logEntry.with(DELETE_CONFIG_FIELD_NAME, Strings.toString(builder)); + return this; + } + static void withGrant(XContentBuilder builder, Grant grant) throws IOException { builder.startObject("grant").field("type", grant.getType()); if (grant.getUsername() != null) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java index 0266fc7488e29..063cc85ea0187 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkSetLoader.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.ssl.SSLService; import java.io.IOException; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java index cc07b7dfa8381..89391f91a2731 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwkValidateUtil.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import java.nio.charset.StandardCharsets; import java.security.PublicKey; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java index ebfaae72b9df2..cfef9aed5967a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticationToken.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.core.security.authc.jwt; +package org.elasticsearch.xpack.security.authc.jwt; import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java index b06aba1c9d87a..2345add07ba51 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticator.java @@ -19,7 +19,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java index 30a7e438e70b0..7613e7b3972af 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java @@ -31,9 +31,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.support.CacheIteratorHelper; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java index e183ee7d73ac2..b1ee1b77998ec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtSignatureValidator.java @@ -35,14 +35,13 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.ssl.SSLService; import java.util.Arrays; import java.util.List; import java.util.stream.Stream; -import static org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil.toStringRedactSignature; +import static org.elasticsearch.xpack.security.authc.jwt.JwtUtil.toStringRedactSignature; public interface JwtSignatureValidator extends Releasable { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java similarity index 99% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java index d70b76f8bc574..928ecd7fa265d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core.security.authc.jwt; +package org.elasticsearch.xpack.security.authc.jwt; import com.nimbusds.jose.JWSObject; import com.nimbusds.jose.jwk.JWK; @@ -47,6 +47,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import java.io.InputStream; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index e637bda19d886..0f34850b861b7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -91,9 +91,9 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.jwt.JwtUtil; import java.io.IOException; import java.net.URI; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index 5bd837c7d817c..a2d2b21b489ea 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -11,8 +11,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DelegatingActionListener; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; @@ -21,9 +25,11 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.MultiSearchResponse.Item; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; @@ -36,26 +42,37 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.action.role.BulkRolesResponse; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheResponse; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; -import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.QueryRoleResponse; +import org.elasticsearch.xpack.core.security.action.role.QueryRoleResponse.QueryRoleResult; +import org.elasticsearch.xpack.core.security.action.role.RoleDescriptorRequestValidator; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; +import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.core.security.support.NativeRealmValidationUtil; +import org.elasticsearch.xpack.security.authz.ReservedRoleNameChecker; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -64,6 +81,7 @@ import java.util.function.Supplier; import static org.elasticsearch.TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS; +import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; @@ -74,6 +92,7 @@ import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ROLE_TYPE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityMigrations.ROLE_METADATA_FLATTENED_MIGRATION_VERSION; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED; @@ -106,6 +125,12 @@ public class NativeRolesStore implements BiConsumer, ActionListener< .allowDescription(true) .build(); + private static final Set UPDATE_ROLES_REFRESH_CACHE_RESULTS = Set.of( + DocWriteResponse.Result.CREATED, + DocWriteResponse.Result.UPDATED, + DocWriteResponse.Result.DELETED + ); + private final Settings settings; private final Client client; private final XPackLicenseState licenseState; @@ -117,13 +142,19 @@ public class NativeRolesStore implements BiConsumer, ActionListener< private final FeatureService featureService; + private final ReservedRoleNameChecker reservedRoleNameChecker; + + private final NamedXContentRegistry xContentRegistry; + public NativeRolesStore( Settings settings, Client client, XPackLicenseState licenseState, SecurityIndexManager securityIndex, ClusterService clusterService, - FeatureService featureService + FeatureService featureService, + ReservedRoleNameChecker reservedRoleNameChecker, + NamedXContentRegistry xContentRegistry ) { this.settings = settings; this.client = client; @@ -131,6 +162,8 @@ public NativeRolesStore( this.securityIndex = securityIndex; this.clusterService = clusterService; this.featureService = featureService; + this.reservedRoleNameChecker = reservedRoleNameChecker; + this.xContentRegistry = xContentRegistry; this.enabled = settings.getAsBoolean(NATIVE_ROLES_ENABLED, true); } @@ -217,6 +250,54 @@ public void getRoleDescriptors(Set names, final ActionListener listener) { + SearchRequest searchRequest = new SearchRequest(new String[] { SECURITY_MAIN_ALIAS }, searchSourceBuilder); + SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); + if (frozenSecurityIndex.indexExists() == false) { + logger.debug("security index does not exist"); + listener.onResponse(QueryRoleResult.EMPTY); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); + } else { + securityIndex.checkIndexVersionThenExecute( + listener::onFailure, + () -> executeAsyncWithOrigin( + client, + SECURITY_ORIGIN, + TransportSearchAction.TYPE, + searchRequest, + ActionListener.wrap(searchResponse -> { + long total = searchResponse.getHits().getTotalHits().value; + if (total == 0) { + logger.debug("No roles found for query [{}]", searchRequest.source().query()); + listener.onResponse(QueryRoleResult.EMPTY); + return; + } + SearchHit[] hits = searchResponse.getHits().getHits(); + List items = Arrays.stream(hits).map(hit -> { + RoleDescriptor roleDescriptor = transformRole(hit.getId(), hit.getSourceRef(), logger, licenseState); + if (roleDescriptor == null) { + return null; + } + return new QueryRoleResponse.Item(roleDescriptor, hit.getSortValues()); + }).filter(Objects::nonNull).toList(); + listener.onResponse(new QueryRoleResult(total, items)); + }, listener::onFailure) + ) + ); + } + } + public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionListener listener) { if (enabled == false) { listener.onFailure(new IllegalStateException("Native role management is disabled")); @@ -230,7 +311,7 @@ public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionLi listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { - DeleteRequest request = client.prepareDelete(SECURITY_MAIN_ALIAS, getIdForRole(deleteRoleRequest.name())).request(); + DeleteRequest request = createRoleDeleteRequest(deleteRoleRequest.name()); request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy()); executeAsyncWithOrigin( client.threadPool().getThreadContext(), @@ -258,89 +339,310 @@ public void onFailure(Exception e) { } } - public void putRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { + public void deleteRoles( + final List roleNames, + WriteRequest.RefreshPolicy refreshPolicy, + final ActionListener listener + ) { if (enabled == false) { listener.onFailure(new IllegalStateException("Native role management is disabled")); return; } + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(refreshPolicy); + Map validationErrorByRoleName = new HashMap<>(); + + for (String roleName : roleNames) { + if (reservedRoleNameChecker.isReserved(roleName)) { + validationErrorByRoleName.put( + roleName, + new IllegalArgumentException("role [" + roleName + "] is reserved and cannot be deleted") + ); + } else { + bulkRequest.add(createRoleDeleteRequest(roleName)); + } + } + + if (bulkRequest.numberOfActions() == 0) { + bulkResponseWithOnlyValidationErrors(roleNames, validationErrorByRoleName, listener); + return; + } + + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); + if (frozenSecurityIndex.indexExists() == false) { + logger.debug("security index does not exist"); + listener.onResponse(new BulkRolesResponse(List.of())); + } else if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); + } else { + securityIndex.checkIndexVersionThenExecute( + listener::onFailure, + () -> executeAsyncWithOrigin( + client.threadPool().getThreadContext(), + SECURITY_ORIGIN, + bulkRequest, + new ActionListener() { + @Override + public void onResponse(BulkResponse bulkResponse) { + bulkResponseAndRefreshRolesCache(roleNames, bulkResponse, validationErrorByRoleName, listener); + } + + @Override + public void onFailure(Exception e) { + logger.error(() -> "failed to delete roles", e); + listener.onFailure(e); + } + }, + client::bulk + ) + ); + } + } + + private void bulkResponseAndRefreshRolesCache( + List roleNames, + BulkResponse bulkResponse, + Map validationErrorByRoleName, + ActionListener listener + ) { + Iterator bulkItemResponses = bulkResponse.iterator(); + BulkRolesResponse.Builder bulkPutRolesResponseBuilder = new BulkRolesResponse.Builder(); + List rolesToRefreshInCache = new ArrayList<>(roleNames.size()); + roleNames.stream().map(roleName -> { + if (validationErrorByRoleName.containsKey(roleName)) { + return BulkRolesResponse.Item.failure(roleName, validationErrorByRoleName.get(roleName)); + } + BulkItemResponse resp = bulkItemResponses.next(); + if (resp.isFailed()) { + return BulkRolesResponse.Item.failure(roleName, resp.getFailure().getCause()); + } + if (UPDATE_ROLES_REFRESH_CACHE_RESULTS.contains(resp.getResponse().getResult())) { + rolesToRefreshInCache.add(roleName); + } + return BulkRolesResponse.Item.success(roleName, resp.getResponse().getResult()); + }).forEach(bulkPutRolesResponseBuilder::addItem); + + clearRoleCache(rolesToRefreshInCache.toArray(String[]::new), ActionListener.wrap(res -> { + listener.onResponse(bulkPutRolesResponseBuilder.build()); + }, listener::onFailure), bulkResponse); + } + + private void bulkResponseWithOnlyValidationErrors( + List roleNames, + Map validationErrorByRoleName, + ActionListener listener + ) { + BulkRolesResponse.Builder bulkRolesResponseBuilder = new BulkRolesResponse.Builder(); + roleNames.stream() + .map(roleName -> BulkRolesResponse.Item.failure(roleName, validationErrorByRoleName.get(roleName))) + .forEach(bulkRolesResponseBuilder::addItem); + + listener.onResponse(bulkRolesResponseBuilder.build()); + } + + private void executeAsyncRolesBulkRequest(BulkRequest bulkRequest, ActionListener listener) { + securityIndex.checkIndexVersionThenExecute( + listener::onFailure, + () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, bulkRequest, listener, client::bulk) + ); + } + + private Exception validateRoleDescriptor(RoleDescriptor role) { + ActionRequestValidationException validationException = null; + validationException = RoleDescriptorRequestValidator.validate(role, validationException); + + if (reservedRoleNameChecker.isReserved(role.getName())) { + throw addValidationError("Role [" + role.getName() + "] is reserved and may not be used.", validationException); + } + if (role.isUsingDocumentOrFieldLevelSecurity() && DOCUMENT_LEVEL_SECURITY_FEATURE.checkWithoutTracking(licenseState) == false) { - listener.onFailure(LicenseUtils.newComplianceException("field and document level security")); + return LicenseUtils.newComplianceException("field and document level security"); } else if (role.hasRemoteIndicesPrivileges() && clusterService.state().getMinTransportVersion().before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) { - listener.onFailure( - new IllegalStateException( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges" - ) + return new IllegalStateException( + "all nodes must have version [" + + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() + + "] or higher to support remote indices privileges" ); } else if (role.hasRemoteClusterPermissions() && clusterService.state().getMinTransportVersion().before(ROLE_REMOTE_CLUSTER_PRIVS)) { - listener.onFailure( - new IllegalStateException( - "all nodes must have version [" + ROLE_REMOTE_CLUSTER_PRIVS + "] or higher to support remote cluster privileges" - ) + return new IllegalStateException( + "all nodes must have version [" + ROLE_REMOTE_CLUSTER_PRIVS + "] or higher to support remote cluster privileges" ); } else if (role.hasDescription() && clusterService.state().getMinTransportVersion().before(TransportVersions.SECURITY_ROLE_DESCRIPTION)) { - listener.onFailure( - new IllegalStateException( - "all nodes must have version [" - + TransportVersions.SECURITY_ROLE_DESCRIPTION.toReleaseVersion() - + "] or higher to support specifying role description" - ) + return new IllegalStateException( + "all nodes must have version [" + + TransportVersions.SECURITY_ROLE_DESCRIPTION.toReleaseVersion() + + "] or higher to support specifying role description" ); - } else { - innerPutRole(request, role, listener); } + try { + DLSRoleQueryValidator.validateQueryField(role.getIndicesPrivileges(), xContentRegistry); + } catch (ElasticsearchException | IllegalArgumentException e) { + return e; + } + + return validationException; } - // pkg-private for testing - void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { - final String roleName = role.getName(); - assert NativeRealmValidationUtil.validateRoleName(roleName, false) == null : "Role name was invalid or reserved: " + roleName; - assert false == role.hasRestriction() : "restriction is not supported for native roles"; + public void putRole(final WriteRequest.RefreshPolicy refreshPolicy, final RoleDescriptor role, final ActionListener listener) { + if (enabled == false) { + listener.onFailure(new IllegalStateException("Native role management is disabled")); + return; + } + Exception validationException = validateRoleDescriptor(role); + + if (validationException != null) { + listener.onFailure(validationException); + return; + } - securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { - final XContentBuilder xContentBuilder; + try { + IndexRequest indexRequest = createRoleIndexRequest(role); + indexRequest.setRefreshPolicy(refreshPolicy); + securityIndex.prepareIndexIfNeededThenExecute( + listener::onFailure, + () -> executeAsyncWithOrigin( + client.threadPool().getThreadContext(), + SECURITY_ORIGIN, + indexRequest, + new ActionListener() { + @Override + public void onResponse(DocWriteResponse indexResponse) { + final boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED; + logger.trace("Created role: [{}]", indexRequest); + clearRoleCache(role.getName(), listener, created); + } + + @Override + public void onFailure(Exception e) { + logger.error(() -> "failed to put role [" + role.getName() + "]", e); + listener.onFailure(e); + } + }, + client::index + ) + ); + } catch (IOException exception) { + listener.onFailure(exception); + } + } + + public void putRoles( + final WriteRequest.RefreshPolicy refreshPolicy, + final List roles, + final ActionListener listener + ) { + if (enabled == false) { + listener.onFailure(new IllegalStateException("Native role management is disabled")); + return; + } + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(refreshPolicy); + Map validationErrorByRoleName = new HashMap<>(); + + for (RoleDescriptor role : roles) { + Exception validationException; try { - xContentBuilder = role.toXContent( - jsonBuilder(), - ToXContent.EMPTY_PARAMS, - true, - featureService.clusterHasFeature(clusterService.state(), SECURITY_ROLES_METADATA_FLATTENED) - ); - } catch (IOException e) { - listener.onFailure(e); - return; + validationException = validateRoleDescriptor(role); + } catch (Exception e) { + validationException = e; } - final IndexRequest indexRequest = client.prepareIndex(SECURITY_MAIN_ALIAS) - .setId(getIdForRole(roleName)) - .setSource(xContentBuilder) - .setRefreshPolicy(request.getRefreshPolicy()) - .request(); - executeAsyncWithOrigin( + + if (validationException != null) { + validationErrorByRoleName.put(role.getName(), validationException); + } else { + try { + bulkRequest.add(createRoleUpsertRequest(role)); + } catch (IOException ioException) { + listener.onFailure(ioException); + } + } + } + + List roleNames = roles.stream().map(RoleDescriptor::getName).toList(); + + if (bulkRequest.numberOfActions() == 0) { + bulkResponseWithOnlyValidationErrors(roleNames, validationErrorByRoleName, listener); + return; + } + + securityIndex.prepareIndexIfNeededThenExecute( + listener::onFailure, + () -> executeAsyncWithOrigin( client.threadPool().getThreadContext(), SECURITY_ORIGIN, - indexRequest, - new ActionListener() { + bulkRequest, + new ActionListener() { @Override - public void onResponse(DocWriteResponse indexResponse) { - final boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED; - logger.trace("Created role: [{}]", indexRequest); - clearRoleCache(roleName, listener, created); + public void onResponse(BulkResponse bulkResponse) { + bulkResponseAndRefreshRolesCache(roleNames, bulkResponse, validationErrorByRoleName, listener); } @Override public void onFailure(Exception e) { - logger.error(() -> "failed to put role [" + roleName + "]", e); + logger.error(() -> "failed to put roles", e); listener.onFailure(e); } }, - client::index - ); - }); + client::bulk + ) + ); + } + + private IndexRequest createRoleIndexRequest(final RoleDescriptor role) throws IOException { + return client.prepareIndex(SECURITY_MAIN_ALIAS) + .setId(getIdForRole(role.getName())) + .setSource(createRoleXContentBuilder(role)) + .request(); + } + + private UpdateRequest createRoleUpsertRequest(final RoleDescriptor role) throws IOException { + return client.prepareUpdate(SECURITY_MAIN_ALIAS, getIdForRole(role.getName())) + .setDoc(createRoleXContentBuilder(role)) + .setDocAsUpsert(true) + .request(); + } + + private DeleteRequest createRoleDeleteRequest(final String roleName) { + return client.prepareDelete(SECURITY_MAIN_ALIAS, getIdForRole(roleName)).request(); + } + + // Package private for testing + XContentBuilder createRoleXContentBuilder(RoleDescriptor role) throws IOException { + assert NativeRealmValidationUtil.validateRoleName(role.getName(), false) == null + : "Role name was invalid or reserved: " + role.getName(); + assert false == role.hasRestriction() : "restriction is not supported for native roles"; + + XContentBuilder builder = jsonBuilder().startObject(); + role.innerToXContent(builder, ToXContent.EMPTY_PARAMS, true); + + if (featureService.clusterHasFeature(clusterService.state(), SECURITY_ROLES_METADATA_FLATTENED)) { + builder.field(RoleDescriptor.Fields.METADATA_FLATTENED.getPreferredName(), role.getMetadata()); + } + + // When role descriptor XContent is generated for the security index all empty fields need to have default values to make sure + // existing values are overwritten if not present since the request to update could be an UpdateRequest + // (update provided fields in existing document or create document) or IndexRequest (replace and reindex document) + if (role.hasConfigurableClusterPrivileges() == false) { + builder.startObject(RoleDescriptor.Fields.GLOBAL.getPreferredName()).endObject(); + } + + if (role.hasRemoteIndicesPrivileges() == false) { + builder.field(RoleDescriptor.Fields.REMOTE_INDICES.getPreferredName(), RoleDescriptor.RemoteIndicesPrivileges.NONE); + } + + if (role.hasRemoteClusterPermissions() == false + && clusterService.state().getMinTransportVersion().onOrAfter(ROLE_REMOTE_CLUSTER_PRIVS)) { + builder.array(RoleDescriptor.Fields.REMOTE_CLUSTER.getPreferredName(), RemoteClusterPermissions.NONE); + } + if (role.hasDescription() == false + && clusterService.state().getMinTransportVersion().onOrAfter(TransportVersions.SECURITY_ROLE_DESCRIPTION)) { + builder.field(RoleDescriptor.Fields.DESCRIPTION.getPreferredName(), ""); + } + + builder.endObject(); + return builder; } public void usageStats(ActionListener> listener) { @@ -415,7 +717,7 @@ public void usageStats(ActionListener> listener) { new DelegatingActionListener>(listener) { @Override public void onResponse(MultiSearchResponse items) { - Item[] responses = items.getResponses(); + MultiSearchResponse.Item[] responses = items.getResponses(); if (responses[0].isFailure()) { usageStats.put("size", 0); } else { @@ -498,7 +800,11 @@ private void executeGetRoleRequest(String role, ActionListener list } private void clearRoleCache(final String role, ActionListener listener, Response response) { - ClearRolesCacheRequest request = new ClearRolesCacheRequest().names(role); + clearRoleCache(new String[] { role }, listener, response); + } + + private void clearRoleCache(final String[] roles, ActionListener listener, Response response) { + ClearRolesCacheRequest request = new ClearRolesCacheRequest().names(roles); executeAsyncWithOrigin(client, SECURITY_ORIGIN, ClearRolesCacheAction.INSTANCE, request, new ActionListener<>() { @Override public void onResponse(ClearRolesCacheResponse nodes) { @@ -507,9 +813,9 @@ public void onResponse(ClearRolesCacheResponse nodes) { @Override public void onFailure(Exception e) { - logger.error(() -> "unable to clear cache for role [" + role + "]", e); + logger.error(() -> "unable to clear cache for roles [" + Arrays.toString(roles) + "]", e); ElasticsearchException exception = new ElasticsearchException( - "clearing the cache for [" + role + "] failed. please clear the role cache manually", + "clearing the cache for [" + Arrays.toString(roles) + "] failed. please clear the role cache manually", e ); listener.onFailure(exception); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java index 39813a2af5dfd..02dc32c4f3f63 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java @@ -39,8 +39,6 @@ public class DefaultOperatorOnlyRegistry implements OperatorOnlyRegistry { // Autoscaling does not publish its actions to core, literal strings are needed. "cluster:admin/autoscaling/put_autoscaling_policy", "cluster:admin/autoscaling/delete_autoscaling_policy", - // Repository analysis is not mentioned in core, a literal string is needed. - "cluster:admin/repository/analyze", // Node shutdown APIs are operator only "cluster:admin/shutdown/create", "cluster:admin/shutdown/get", diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestBulkDeleteRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestBulkDeleteRolesAction.java new file mode 100644 index 0000000000000..683faf5cfa914 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestBulkDeleteRolesAction.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security.rest.action.role; + +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Rest endpoint to bulk delete roles to the security index + */ +public class RestBulkDeleteRolesAction extends NativeRoleBaseRestHandler { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "bulk_delete_roles_request", + a -> new BulkDeleteRolesRequest((List) a[0]) + ); + + static { + PARSER.declareStringArray(constructorArg(), new ParseField("names")); + } + + public RestBulkDeleteRolesAction(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + public List routes() { + return List.of(Route.builder(DELETE, "/_security/role").build()); + } + + @Override + public String getName() { + return "security_bulk_delete_roles_action"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + BulkDeleteRolesRequest bulkDeleteRolesRequest = PARSER.parse(request.contentParser(), null); + if (request.param("refresh") != null) { + bulkDeleteRolesRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.parse(request.param("refresh"))); + } + return channel -> client.execute(ActionTypes.BULK_DELETE_ROLES, bulkDeleteRolesRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestBulkPutRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestBulkPutRolesAction.java new file mode 100644 index 0000000000000..f132da09c4ec0 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestBulkPutRolesAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security.rest.action.role; + +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRoleRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRoleRequestBuilderFactory; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * Rest endpoint to bulk add a Roles to the security index + */ +public class RestBulkPutRolesAction extends NativeRoleBaseRestHandler { + + private final BulkPutRoleRequestBuilderFactory builderFactory; + + public RestBulkPutRolesAction(Settings settings, XPackLicenseState licenseState, BulkPutRoleRequestBuilderFactory builderFactory) { + super(settings, licenseState); + this.builderFactory = builderFactory; + } + + @Override + public List routes() { + return List.of(Route.builder(POST, "/_security/role").build()); + } + + @Override + public String getName() { + return "security_bulk_put_roles_action"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final BulkPutRoleRequestBuilder requestBuilder = builderFactory.create(client) + .content(request.requiredContent(), request.getXContentType()); + + if (request.param("refresh") != null) { + requestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.parse(request.param("refresh"))); + } + + return channel -> requestBuilder.execute(new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestQueryRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestQueryRoleAction.java new file mode 100644 index 0000000000000..c2dc7166bd3b6 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestQueryRoleAction.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.role; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.role.QueryRoleRequest; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public final class RestQueryRoleAction extends NativeRoleBaseRestHandler { + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "query_role_request_payload", + a -> new Payload((QueryBuilder) a[0], (Integer) a[1], (Integer) a[2], (List) a[3], (SearchAfterBuilder) a[4]) + ); + + static { + PARSER.declareObject(optionalConstructorArg(), (p, c) -> parseTopLevelQuery(p), new ParseField("query")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("from")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("size")); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return new FieldSortBuilder(p.text()); + } else if (p.currentToken() == XContentParser.Token.START_OBJECT) { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p); + final FieldSortBuilder fieldSortBuilder = FieldSortBuilder.fromXContent(p, p.currentName()); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p); + return fieldSortBuilder; + } else { + throw new IllegalArgumentException("malformed sort object"); + } + }, new ParseField("sort")); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> SearchAfterBuilder.fromXContent(p), + new ParseField("search_after"), + ObjectParser.ValueType.VALUE_ARRAY + ); + } + + public RestQueryRoleAction(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + public String getName() { + return "xpack_security_query_role"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_security/_query/role"), new Route(POST, "/_security/_query/role")); + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final QueryRoleRequest queryRoleRequest; + if (request.hasContentOrSourceParam()) { + RestQueryRoleAction.Payload payload = PARSER.parse(request.contentOrSourceParamParser(), null); + queryRoleRequest = new QueryRoleRequest( + payload.queryBuilder, + payload.from, + payload.size, + payload.fieldSortBuilders, + payload.searchAfterBuilder + ); + } else { + queryRoleRequest = new QueryRoleRequest(null, null, null, null, null); + } + return channel -> client.execute(ActionTypes.QUERY_ROLE_ACTION, queryRoleRequest, new RestToXContentListener<>(channel)); + } + + private record Payload( + @Nullable QueryBuilder queryBuilder, + @Nullable Integer from, + @Nullable Integer size, + @Nullable List fieldSortBuilders, + @Nullable SearchAfterBuilder searchAfterBuilder + ) {} +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FieldNameTranslators.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FieldNameTranslators.java index 6d0b076fd9bf1..31689f22608c6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FieldNameTranslators.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FieldNameTranslators.java @@ -38,9 +38,12 @@ import java.util.function.Consumer; import static org.elasticsearch.xpack.security.action.apikey.TransportQueryApiKeyAction.API_KEY_TYPE_RUNTIME_MAPPING_FIELD; +import static org.elasticsearch.xpack.security.action.role.TransportQueryRoleAction.ROLE_NAME_RUNTIME_MAPPING_FIELD; public final class FieldNameTranslators { + public static final String FLATTENED_METADATA_INDEX_FIELD_NAME = "metadata_flattened"; + public static final FieldNameTranslators API_KEY_FIELD_NAME_TRANSLATORS = new FieldNameTranslators( List.of( new SimpleFieldNameTranslator("creator.principal", "username"), @@ -53,7 +56,7 @@ public final class FieldNameTranslators { new SimpleFieldNameTranslator("invalidation_time", "invalidation"), // allows querying on any non-wildcard sub-fields under the "metadata." prefix // also allows querying on the "metadata" field itself (including by specifying patterns) - new FlattenedFieldNameTranslator("metadata_flattened", "metadata") + new FlattenedFieldNameTranslator(FLATTENED_METADATA_INDEX_FIELD_NAME, "metadata") ) ); @@ -68,6 +71,19 @@ public final class FieldNameTranslators { ) ); + public static final FieldNameTranslators ROLE_FIELD_NAME_TRANSLATORS = new FieldNameTranslators( + List.of( + new SimpleFieldNameTranslator(ROLE_NAME_RUNTIME_MAPPING_FIELD, "name"), + idemFieldNameTranslator("description"), + idemFieldNameTranslator("applications.application"), + idemFieldNameTranslator("applications.resources"), + idemFieldNameTranslator("applications.privileges"), + // allows querying on any non-wildcard sub-fields under the "metadata." prefix + // also allows querying on the "metadata" field itself (including by specifying patterns) + new FlattenedFieldNameTranslator(FLATTENED_METADATA_INDEX_FIELD_NAME, "metadata") + ) + ); + private final List fieldNameTranslators; private FieldNameTranslators(List fieldNameTranslators) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/RoleBoolQueryBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/RoleBoolQueryBuilder.java new file mode 100644 index 0000000000000..1d5b93fbb1917 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/RoleBoolQueryBuilder.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import org.apache.lucene.search.Query; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.Set; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.security.support.FieldNameTranslators.ROLE_FIELD_NAME_TRANSLATORS; + +public class RoleBoolQueryBuilder extends BoolQueryBuilder { + + // Field names allowed at the index level + private static final Set FIELDS_ALLOWED_TO_QUERY = Set.of("_id", "type"); + + private RoleBoolQueryBuilder() {} + + /** + * Build a bool query that is specialised for querying roles from the security index. + * The method processes the given QueryBuilder to ensure: + * * Only fields from an allowlist are queried + * * Only query types from an allowlist are used + * * Field names used in the Query DSL get translated into corresponding names used at the index level. + * * Not exposing any other types of documents stored in the same security index + * + * @param queryBuilder This represents the query parsed directly from the user input. It is validated + * and transformed (see above). + * @param fieldNameVisitor This {@code Consumer} is invoked with all the (index-level) field names referred to in the passed-in query. + * @return A specialised query builder for roles that is safe to run on the security index. + */ + public static RoleBoolQueryBuilder build(QueryBuilder queryBuilder, @Nullable Consumer fieldNameVisitor) { + final RoleBoolQueryBuilder finalQuery = new RoleBoolQueryBuilder(); + if (queryBuilder != null) { + QueryBuilder processedQuery = ROLE_FIELD_NAME_TRANSLATORS.translateQueryBuilderFields(queryBuilder, fieldNameVisitor); + finalQuery.must(processedQuery); + } + finalQuery.filter(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), RoleDescriptor.ROLE_TYPE)); + if (fieldNameVisitor != null) { + fieldNameVisitor.accept(RoleDescriptor.Fields.TYPE.getPreferredName()); + } + return finalQuery; + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + context.setAllowedFields(RoleBoolQueryBuilder::isIndexFieldNameAllowed); + return super.doToQuery(context); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (queryRewriteContext instanceof SearchExecutionContext) { + ((SearchExecutionContext) queryRewriteContext).setAllowedFields(RoleBoolQueryBuilder::isIndexFieldNameAllowed); + } + return super.doRewrite(queryRewriteContext); + } + + static boolean isIndexFieldNameAllowed(String fieldName) { + return FIELDS_ALLOWED_TO_QUERY.contains(fieldName) || ROLE_FIELD_NAME_TRANSLATORS.isIndexFieldSupported(fieldName); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 1ac22bfd21883..61314e14c8bec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -38,6 +38,7 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.rest.RestStatus; @@ -55,6 +56,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_FORMAT_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_VERSION_CREATED; import static org.elasticsearch.indices.SystemIndexDescriptor.VERSION_META_KEY; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_DATA_KEY; @@ -185,6 +187,14 @@ public boolean isStateRecovered() { return this.state != State.UNRECOVERED_STATE; } + public boolean isMigrationsVersionAtLeast(Integer expectedMigrationsVersion) { + return indexExists() && this.state.migrationsVersion.compareTo(expectedMigrationsVersion) >= 0; + } + + public boolean isCreatedOnLatestVersion() { + return this.state.createdOnLatestVersion; + } + public ElasticsearchException getUnavailableReason(Availability availability) { // ensure usage of a local copy so all checks execute against the same state! if (defensiveCopy == false) { @@ -244,6 +254,16 @@ private SystemIndexDescriptor.MappingsVersion getMinSecurityIndexMappingVersion( return mappingsVersion == null ? new SystemIndexDescriptor.MappingsVersion(1, 0) : mappingsVersion; } + /** + * Check if the index was created on the latest index version available in the cluster + */ + private static boolean isCreatedOnLatestVersion(IndexMetadata indexMetadata) { + final IndexVersion indexVersionCreated = indexMetadata != null + ? SETTING_INDEX_VERSION_CREATED.get(indexMetadata.getSettings()) + : null; + return indexVersionCreated != null && indexVersionCreated.onOrAfter(IndexVersion.current()); + } + @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { @@ -254,7 +274,7 @@ public void clusterChanged(ClusterChangedEvent event) { } final State previousState = state; final IndexMetadata indexMetadata = resolveConcreteIndex(systemIndexDescriptor.getAliasName(), event.state().metadata()); - final Map customMetadata = indexMetadata == null ? null : indexMetadata.getCustomData(MIGRATION_VERSION_CUSTOM_KEY); + final boolean createdOnLatestVersion = isCreatedOnLatestVersion(indexMetadata); final Instant creationTime = indexMetadata != null ? Instant.ofEpochMilli(indexMetadata.getCreationDate()) : null; final boolean isIndexUpToDate = indexMetadata == null || INDEX_FORMAT_SETTING.get(indexMetadata.getSettings()) == systemIndexDescriptor.getIndexFormat(); @@ -262,7 +282,7 @@ public void clusterChanged(ClusterChangedEvent event) { final boolean indexAvailableForWrite = available.v1(); final boolean indexAvailableForSearch = available.v2(); final boolean mappingIsUpToDate = indexMetadata == null || checkIndexMappingUpToDate(event.state()); - final int migrationsVersion = customMetadata == null ? 0 : Integer.parseInt(customMetadata.get(MIGRATION_VERSION_CUSTOM_DATA_KEY)); + final int migrationsVersion = getMigrationVersionFromIndexMetadata(indexMetadata); final SystemIndexDescriptor.MappingsVersion minClusterMappingVersion = getMinSecurityIndexMappingVersion(event.state()); final int indexMappingVersion = loadIndexMappingVersion(systemIndexDescriptor.getAliasName(), event.state()); final String concreteIndexName = indexMetadata == null @@ -290,6 +310,7 @@ public void clusterChanged(ClusterChangedEvent event) { indexAvailableForSearch, indexAvailableForWrite, mappingIsUpToDate, + createdOnLatestVersion, migrationsVersion, minClusterMappingVersion, indexMappingVersion, @@ -310,6 +331,15 @@ public void clusterChanged(ClusterChangedEvent event) { } } + public static int getMigrationVersionFromIndexMetadata(IndexMetadata indexMetadata) { + Map customMetadata = indexMetadata == null ? null : indexMetadata.getCustomData(MIGRATION_VERSION_CUSTOM_KEY); + if (customMetadata == null) { + return 0; + } + String migrationVersion = customMetadata.get(MIGRATION_VERSION_CUSTOM_DATA_KEY); + return migrationVersion == null ? 0 : Integer.parseInt(migrationVersion); + } + public void onStateRecovered(Consumer recoveredStateConsumer) { BiConsumer stateChangeListener = (previousState, nextState) -> { boolean stateJustRecovered = previousState == UNRECOVERED_STATE && nextState != UNRECOVERED_STATE; @@ -588,6 +618,7 @@ public static class State { false, false, false, + false, null, null, null, @@ -602,6 +633,7 @@ public static class State { public final boolean indexAvailableForSearch; public final boolean indexAvailableForWrite; public final boolean mappingUpToDate; + public final boolean createdOnLatestVersion; public final Integer migrationsVersion; // Min mapping version supported by the descriptors in the cluster public final SystemIndexDescriptor.MappingsVersion minClusterMappingVersion; @@ -619,6 +651,7 @@ public State( boolean indexAvailableForSearch, boolean indexAvailableForWrite, boolean mappingUpToDate, + boolean createdOnLatestVersion, Integer migrationsVersion, SystemIndexDescriptor.MappingsVersion minClusterMappingVersion, Integer indexMappingVersion, @@ -634,6 +667,7 @@ public State( this.indexAvailableForWrite = indexAvailableForWrite; this.mappingUpToDate = mappingUpToDate; this.migrationsVersion = migrationsVersion; + this.createdOnLatestVersion = createdOnLatestVersion; this.minClusterMappingVersion = minClusterMappingVersion; this.indexMappingVersion = indexMappingVersion; this.concreteIndexName = concreteIndexName; @@ -653,6 +687,7 @@ public boolean equals(Object o) { && indexAvailableForSearch == state.indexAvailableForSearch && indexAvailableForWrite == state.indexAvailableForWrite && mappingUpToDate == state.mappingUpToDate + && createdOnLatestVersion == state.createdOnLatestVersion && Objects.equals(indexMappingVersion, state.indexMappingVersion) && Objects.equals(migrationsVersion, state.migrationsVersion) && Objects.equals(minClusterMappingVersion, state.minClusterMappingVersion) @@ -674,6 +709,7 @@ public int hashCode() { indexAvailableForSearch, indexAvailableForWrite, mappingUpToDate, + createdOnLatestVersion, migrationsVersion, minClusterMappingVersion, indexMappingVersion, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutor.java index bd5d0fb5a8ef5..0f895a2db17e0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutor.java @@ -46,10 +46,24 @@ public SecurityMigrationExecutor( @Override protected void nodeOperation(AllocatedPersistentTask task, SecurityMigrationTaskParams params, PersistentTaskState state) { - applyOutstandingMigrations(task, params.getMigrationVersion(), ActionListener.wrap((res) -> task.markAsCompleted(), (exception) -> { + ActionListener listener = ActionListener.wrap((res) -> task.markAsCompleted(), (exception) -> { logger.warn("Security migration failed: " + exception); task.markAsFailed(exception); - })); + }); + + if (params.isMigrationNeeded() == false) { + updateMigrationVersion( + params.getMigrationVersion(), + securityIndexManager.getConcreteIndexName(), + ActionListener.wrap(response -> { + logger.info("Security migration not needed. Setting current version to: [" + params.getMigrationVersion() + "]"); + listener.onResponse(response); + }, listener::onFailure) + ); + return; + } + + applyOutstandingMigrations(task, params.getMigrationVersion(), listener); } private void applyOutstandingMigrations(AllocatedPersistentTask task, int currentMigrationVersion, ActionListener listener) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java index 8ef132ad0ed34..5ec76a8dc3d01 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java @@ -28,6 +28,10 @@ import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS; +/** + * Interface for creating SecurityMigrations that will be automatically applied once to existing .security indices + * IMPORTANT: A new index version needs to be added to {@link org.elasticsearch.index.IndexVersions} for the migration to be triggered + */ public class SecurityMigrations { public interface SecurityMigration { @@ -57,55 +61,64 @@ public interface SecurityMigration { int minMappingVersion(); } - public static final TreeMap MIGRATIONS_BY_VERSION = new TreeMap<>(Map.of(1, new SecurityMigration() { - private static final Logger logger = LogManager.getLogger(SecurityMigration.class); + public static final Integer ROLE_METADATA_FLATTENED_MIGRATION_VERSION = 1; - @Override - public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { - BoolQueryBuilder filterQuery = new BoolQueryBuilder().filter(QueryBuilders.termQuery("type", "role")) - .mustNot(QueryBuilders.existsQuery("metadata_flattened")); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(filterQuery).size(0).trackTotalHits(true); - SearchRequest countRequest = new SearchRequest(indexManager.getConcreteIndexName()); - countRequest.source(searchSourceBuilder); + public static final TreeMap MIGRATIONS_BY_VERSION = new TreeMap<>( + Map.of(ROLE_METADATA_FLATTENED_MIGRATION_VERSION, new SecurityMigration() { + private static final Logger logger = LogManager.getLogger(SecurityMigration.class); - client.search(countRequest, ActionListener.wrap(response -> { - // If there are no roles, skip migration - if (response.getHits().getTotalHits().value > 0) { - logger.info("Preparing to migrate [" + response.getHits().getTotalHits().value + "] roles"); - updateRolesByQuery(indexManager, client, filterQuery, listener); - } else { - listener.onResponse(null); - } - }, listener::onFailure)); - } + @Override + public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { + BoolQueryBuilder filterQuery = new BoolQueryBuilder().filter(QueryBuilders.termQuery("type", "role")) + .mustNot(QueryBuilders.existsQuery("metadata_flattened")); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(filterQuery).size(0).trackTotalHits(true); + SearchRequest countRequest = new SearchRequest(indexManager.getConcreteIndexName()); + countRequest.source(searchSourceBuilder); - private void updateRolesByQuery( - SecurityIndexManager indexManager, - Client client, - BoolQueryBuilder filterQuery, - ActionListener listener - ) { - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(indexManager.getConcreteIndexName()); - updateByQueryRequest.setQuery(filterQuery); - updateByQueryRequest.setScript( - new Script(ScriptType.INLINE, "painless", "ctx._source.metadata_flattened = ctx._source.metadata", Collections.emptyMap()) - ); - client.admin() - .cluster() - .execute(UpdateByQueryAction.INSTANCE, updateByQueryRequest, ActionListener.wrap(bulkByScrollResponse -> { - logger.info("Migrated [" + bulkByScrollResponse.getTotal() + "] roles"); - listener.onResponse(null); + client.search(countRequest, ActionListener.wrap(response -> { + // If there are no roles, skip migration + if (response.getHits().getTotalHits().value > 0) { + logger.info("Preparing to migrate [" + response.getHits().getTotalHits().value + "] roles"); + updateRolesByQuery(indexManager, client, filterQuery, listener); + } else { + listener.onResponse(null); + } }, listener::onFailure)); - } + } + + private void updateRolesByQuery( + SecurityIndexManager indexManager, + Client client, + BoolQueryBuilder filterQuery, + ActionListener listener + ) { + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(indexManager.getConcreteIndexName()); + updateByQueryRequest.setQuery(filterQuery); + updateByQueryRequest.setScript( + new Script( + ScriptType.INLINE, + "painless", + "ctx._source.metadata_flattened = ctx._source.metadata", + Collections.emptyMap() + ) + ); + client.admin() + .cluster() + .execute(UpdateByQueryAction.INSTANCE, updateByQueryRequest, ActionListener.wrap(bulkByScrollResponse -> { + logger.info("Migrated [" + bulkByScrollResponse.getTotal() + "] roles"); + listener.onResponse(null); + }, listener::onFailure)); + } - @Override - public Set nodeFeaturesRequired() { - return Set.of(SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED); - } + @Override + public Set nodeFeaturesRequired() { + return Set.of(SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED); + } - @Override - public int minMappingVersion() { - return ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS.id(); - } - })); + @Override + public int minMappingVersion() { + return ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS.id(); + } + }) + ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 69e8d7b8b681e..1aa40a48ecc97 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -61,7 +61,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.ScriptService; import org.elasticsearch.telemetry.TelemetryProvider; -import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.MockLog; @@ -821,7 +820,7 @@ public void testSecurityRestHandlerInterceptorCanBeInstalled() throws IllegalAcc null, usageService, null, - Tracer.NOOP, + TelemetryProvider.NOOP, mock(ClusterService.class), null, List.of(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java index cac7c91f73ed1..978a7a44b08a5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java @@ -21,7 +21,6 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.nullValue; /** * Tests that the ReservedRoleMappingAction does validation, can add and remove role mappings @@ -31,9 +30,7 @@ public class ReservedRoleMappingActionTests extends ESTestCase { private TransformState processJSON(ReservedRoleMappingAction action, TransformState prevState, String json) throws Exception { try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json)) { var content = action.fromXContent(parser); - var state = action.transform(content, prevState); - assertThat(state.nonStateTransform(), nullValue()); - return state; + return action.transform(content, prevState); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java index 8610273f205c9..759bc80ac511f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java @@ -6,13 +6,9 @@ */ package org.elasticsearch.xpack.security.action.role; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -28,21 +24,16 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; -import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; -import org.elasticsearch.xpack.security.authz.ReservedRoleNameChecker; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import org.junit.BeforeClass; -import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -53,7 +44,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; public class TransportPutRoleActionTests extends ESTestCase { @@ -92,109 +82,6 @@ protected NamedXContentRegistry xContentRegistry() { ); } - public void testReservedRole() { - final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names())); - NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - mock(ThreadPool.class), - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, - null, - Collections.emptySet() - ); - TransportPutRoleAction action = new TransportPutRoleAction( - mock(ActionFilters.class), - rolesStore, - transportService, - xContentRegistry(), - new ReservedRoleNameChecker.Default() - ); - - PutRoleRequest request = new PutRoleRequest(); - request.name(roleName); - - final AtomicReference throwableRef = new AtomicReference<>(); - final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), request, new ActionListener() { - @Override - public void onResponse(PutRoleResponse response) { - responseRef.set(response); - } - - @Override - public void onFailure(Exception e) { - throwableRef.set(e); - } - }); - - assertThat(responseRef.get(), is(nullValue())); - assertThat(throwableRef.get(), is(instanceOf(IllegalArgumentException.class))); - assertThat(throwableRef.get().getMessage(), containsString("is reserved and may not be used")); - verifyNoMoreInteractions(rolesStore); - } - - public void testValidRole() { - testValidRole(randomFrom("admin", "dept_a", "restricted")); - } - - public void testValidRoleWithInternalRoleName() { - testValidRole(AuthenticationTestHelper.randomInternalRoleName()); - } - - private void testValidRole(String roleName) { - NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - mock(ThreadPool.class), - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, - null, - Collections.emptySet() - ); - TransportPutRoleAction action = new TransportPutRoleAction( - mock(ActionFilters.class), - rolesStore, - transportService, - xContentRegistry(), - new ReservedRoleNameChecker.Default() - ); - - final boolean created = randomBoolean(); - PutRoleRequest request = new PutRoleRequest(); - request.name(roleName); - - doAnswer(invocation -> { - Object[] args = invocation.getArguments(); - assert args.length == 3; - @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) args[2]; - listener.onResponse(created); - return null; - }).when(rolesStore).putRole(eq(request), any(RoleDescriptor.class), anyActionListener()); - - final AtomicReference throwableRef = new AtomicReference<>(); - final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), request, new ActionListener() { - @Override - public void onResponse(PutRoleResponse response) { - responseRef.set(response); - } - - @Override - public void onFailure(Exception e) { - throwableRef.set(e); - } - }); - - assertThat(responseRef.get(), is(notNullValue())); - assertThat(responseRef.get().isCreated(), is(created)); - assertThat(throwableRef.get(), is(nullValue())); - verify(rolesStore, times(1)).putRole(eq(request), any(RoleDescriptor.class), anyActionListener()); - } - public void testException() { final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); final String roleName = randomFrom("admin", "dept_a", "restricted"); @@ -208,17 +95,10 @@ public void testException() { null, Collections.emptySet() ); - TransportPutRoleAction action = new TransportPutRoleAction( - mock(ActionFilters.class), - rolesStore, - transportService, - xContentRegistry(), - new ReservedRoleNameChecker.Default() - ); + TransportPutRoleAction action = new TransportPutRoleAction(mock(ActionFilters.class), rolesStore, transportService); PutRoleRequest request = new PutRoleRequest(); request.name(roleName); - doAnswer(invocation -> { Object[] args = invocation.getArguments(); assert args.length == 3; @@ -226,11 +106,11 @@ public void testException() { ActionListener listener = (ActionListener) args[2]; listener.onFailure(e); return null; - }).when(rolesStore).putRole(eq(request), any(RoleDescriptor.class), anyActionListener()); + }).when(rolesStore).putRole(eq(request.getRefreshPolicy()), any(RoleDescriptor.class), anyActionListener()); final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), request, new ActionListener() { + action.doExecute(mock(Task.class), request, new ActionListener<>() { @Override public void onResponse(PutRoleResponse response) { responseRef.set(response); @@ -245,115 +125,6 @@ public void onFailure(Exception e) { assertThat(responseRef.get(), is(nullValue())); assertThat(throwableRef.get(), is(notNullValue())); assertThat(throwableRef.get(), is(sameInstance(e))); - verify(rolesStore, times(1)).putRole(eq(request), any(RoleDescriptor.class), anyActionListener()); - } - - public void testCreationOfRoleWithMalformedQueryJsonFails() { - NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - mock(ThreadPool.class), - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, - null, - Collections.emptySet() - ); - TransportPutRoleAction action = new TransportPutRoleAction( - mock(ActionFilters.class), - rolesStore, - transportService, - xContentRegistry(), - new ReservedRoleNameChecker.Default() - ); - PutRoleRequest request = new PutRoleRequest(); - request.name("test"); - String[] malformedQueryJson = new String[] { - "{ \"match_all\": { \"unknown_field\": \"\" } }", - "{ malformed JSON }", - "{ \"unknown\": {\"\"} }", - "{}" }; - BytesReference query = new BytesArray(randomFrom(malformedQueryJson)); - request.addIndex(new String[] { "idx1" }, new String[] { "read" }, null, null, query, randomBoolean()); - - final AtomicReference throwableRef = new AtomicReference<>(); - final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), request, new ActionListener() { - @Override - public void onResponse(PutRoleResponse response) { - responseRef.set(response); - } - - @Override - public void onFailure(Exception e) { - throwableRef.set(e); - } - }); - - assertThat(responseRef.get(), is(nullValue())); - assertThat(throwableRef.get(), is(notNullValue())); - Throwable t = throwableRef.get(); - assertThat(t, instanceOf(ElasticsearchParseException.class)); - assertThat( - t.getMessage(), - containsString( - "failed to parse field 'query' for indices [" - + Strings.arrayToCommaDelimitedString(new String[] { "idx1" }) - + "] at index privilege [0] of role descriptor" - ) - ); - } - - public void testCreationOfRoleWithUnsupportedQueryFails() throws Exception { - NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - mock(ThreadPool.class), - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, - null, - Collections.emptySet() - ); - TransportPutRoleAction action = new TransportPutRoleAction( - mock(ActionFilters.class), - rolesStore, - transportService, - xContentRegistry(), - new ReservedRoleNameChecker.Default() - ); - PutRoleRequest request = new PutRoleRequest(); - request.name("test"); - String hasChildQuery = "{ \"has_child\": { \"type\": \"child\", \"query\": { \"match_all\": {} } } }"; - String hasParentQuery = "{ \"has_parent\": { \"parent_type\": \"parent\", \"query\": { \"match_all\": {} } } }"; - BytesReference query = new BytesArray(randomFrom(hasChildQuery, hasParentQuery)); - request.addIndex(new String[] { "idx1" }, new String[] { "read" }, null, null, query, randomBoolean()); - - final AtomicReference throwableRef = new AtomicReference<>(); - final AtomicReference responseRef = new AtomicReference<>(); - action.doExecute(mock(Task.class), request, new ActionListener() { - @Override - public void onResponse(PutRoleResponse response) { - responseRef.set(response); - } - - @Override - public void onFailure(Exception e) { - throwableRef.set(e); - } - }); - - assertThat(responseRef.get(), is(nullValue())); - assertThat(throwableRef.get(), is(notNullValue())); - Throwable t = throwableRef.get(); - assertThat(t, instanceOf(ElasticsearchParseException.class)); - assertThat( - t.getMessage(), - containsString( - "failed to parse field 'query' for indices [" - + Strings.arrayToCommaDelimitedString(new String[] { "idx1" }) - + "] at index privilege [0] of role descriptor" - ) - ); + verify(rolesStore, times(1)).putRole(eq(request.getRefreshPolicy()), any(RoleDescriptor.class), anyActionListener()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index a3292a6ab5f4e..17bad90415e7c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; @@ -73,6 +74,8 @@ import org.elasticsearch.xpack.core.security.action.profile.SetProfileEnabledRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; @@ -772,20 +775,19 @@ public void testSecurityConfigChangeEventFormattingForRoles() throws IOException auditTrail.accessGranted(requestId, authentication, PutRoleAction.NAME, putRoleRequest, authorizationInfo); output = CapturingLogger.output(logger.getName(), Level.INFO); assertThat(output.size(), is(2)); - String generatedPutRoleAuditEventString = output.get(1); - String expectedPutRoleAuditEventString = Strings.format(""" - "put":{"role":{"name":"%s","role_descriptor":%s}}\ - """, putRoleRequest.name(), auditedRolesMap.get(putRoleRequest.name())); - assertThat(generatedPutRoleAuditEventString, containsString(expectedPutRoleAuditEventString)); - generatedPutRoleAuditEventString = generatedPutRoleAuditEventString.replace(", " + expectedPutRoleAuditEventString, ""); - checkedFields = new HashMap<>(commonFields); - checkedFields.remove(LoggingAuditTrail.ORIGIN_ADDRESS_FIELD_NAME); - checkedFields.remove(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME); - checkedFields.put("type", "audit"); - checkedFields.put(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "security_config_change"); - checkedFields.put(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME, "put_role"); - checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); - assertMsg(generatedPutRoleAuditEventString, checkedFields); + assertPutRoleAuditLogLine(putRoleRequest.name(), output.get(1), auditedRolesMap, requestId); + // clear log + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + + BulkPutRolesRequest bulkPutRolesRequest = new BulkPutRolesRequest(allTestRoleDescriptors); + bulkPutRolesRequest.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); + auditTrail.accessGranted(requestId, authentication, ActionTypes.BULK_PUT_ROLES.name(), bulkPutRolesRequest, authorizationInfo); + output = CapturingLogger.output(logger.getName(), Level.INFO); + assertThat(output.size(), is(allTestRoleDescriptors.size() + 1)); + + for (int i = 0; i < allTestRoleDescriptors.size(); i++) { + assertPutRoleAuditLogLine(allTestRoleDescriptors.get(i).getName(), output.get(i + 1), auditedRolesMap, requestId); + } // clear log CapturingLogger.output(logger.getName(), Level.INFO).clear(); @@ -795,25 +797,64 @@ public void testSecurityConfigChangeEventFormattingForRoles() throws IOException auditTrail.accessGranted(requestId, authentication, DeleteRoleAction.NAME, deleteRoleRequest, authorizationInfo); output = CapturingLogger.output(logger.getName(), Level.INFO); assertThat(output.size(), is(2)); - String generatedDeleteRoleAuditEventString = output.get(1); + assertDeleteRoleAuditLogLine(putRoleRequest.name(), output.get(1), requestId); + // clear log + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + + BulkDeleteRolesRequest bulkDeleteRolesRequest = new BulkDeleteRolesRequest( + allTestRoleDescriptors.stream().map(RoleDescriptor::getName).toList() + ); + bulkDeleteRolesRequest.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); + auditTrail.accessGranted( + requestId, + authentication, + ActionTypes.BULK_DELETE_ROLES.name(), + bulkDeleteRolesRequest, + authorizationInfo + ); + output = CapturingLogger.output(logger.getName(), Level.INFO); + assertThat(output.size(), is(allTestRoleDescriptors.size() + 1)); + for (int i = 0; i < allTestRoleDescriptors.size(); i++) { + assertDeleteRoleAuditLogLine(allTestRoleDescriptors.get(i).getName(), output.get(i + 1), requestId); + } + } + + private void assertPutRoleAuditLogLine(String roleName, String logLine, Map expectedLogByRoleName, String requestId) { + String expectedPutRoleAuditEventString = Strings.format(""" + "put":{"role":{"name":"%s","role_descriptor":%s}}\ + """, roleName, expectedLogByRoleName.get(roleName)); + + assertThat(logLine, containsString(expectedPutRoleAuditEventString)); + String reducedLogLine = logLine.replace(", " + expectedPutRoleAuditEventString, ""); + Map checkedFields = new HashMap<>(commonFields); + checkedFields.remove(LoggingAuditTrail.ORIGIN_ADDRESS_FIELD_NAME); + checkedFields.remove(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME); + checkedFields.put("type", "audit"); + checkedFields.put(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "security_config_change"); + checkedFields.put(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME, "put_role"); + checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); + assertMsg(reducedLogLine, checkedFields); + } + + private void assertDeleteRoleAuditLogLine(String roleName, String logLine, String requestId) { StringBuilder deleteRoleStringBuilder = new StringBuilder().append("\"delete\":{\"role\":{\"name\":"); - if (deleteRoleRequest.name() == null) { + if (roleName == null) { deleteRoleStringBuilder.append("null"); } else { - deleteRoleStringBuilder.append("\"").append(deleteRoleRequest.name()).append("\""); + deleteRoleStringBuilder.append("\"").append(roleName).append("\""); } deleteRoleStringBuilder.append("}}"); String expectedDeleteRoleAuditEventString = deleteRoleStringBuilder.toString(); - assertThat(generatedDeleteRoleAuditEventString, containsString(expectedDeleteRoleAuditEventString)); - generatedDeleteRoleAuditEventString = generatedDeleteRoleAuditEventString.replace(", " + expectedDeleteRoleAuditEventString, ""); - checkedFields = new HashMap<>(commonFields); + assertThat(logLine, containsString(expectedDeleteRoleAuditEventString)); + String reducedLogLine = logLine.replace(", " + expectedDeleteRoleAuditEventString, ""); + Map checkedFields = new HashMap<>(commonFields); checkedFields.remove(LoggingAuditTrail.ORIGIN_ADDRESS_FIELD_NAME); checkedFields.remove(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME); checkedFields.put("type", "audit"); checkedFields.put(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "security_config_change"); checkedFields.put(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME, "delete_role"); checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); - assertMsg(generatedDeleteRoleAuditEventString, checkedFields); + assertMsg(reducedLogLine, checkedFields); } public void testSecurityConfigChangeEventForCrossClusterApiKeys() throws IOException { @@ -1975,6 +2016,11 @@ public void testSecurityConfigChangedEventSelection() { Tuple actionAndRequest = randomFrom( new Tuple<>(PutUserAction.NAME, new PutUserRequest()), new Tuple<>(PutRoleAction.NAME, new PutRoleRequest()), + new Tuple<>( + ActionTypes.BULK_PUT_ROLES.name(), + new BulkPutRolesRequest(List.of(new RoleDescriptor(randomAlphaOfLength(20), null, null, null))) + ), + new Tuple<>(ActionTypes.BULK_DELETE_ROLES.name(), new BulkDeleteRolesRequest(List.of(randomAlphaOfLength(20)))), new Tuple<>(PutRoleMappingAction.NAME, new PutRoleMappingRequest()), new Tuple<>(TransportSetEnabledAction.TYPE.name(), new SetEnabledRequest()), new Tuple<>(TransportChangePasswordAction.TYPE.name(), new ChangePasswordRequest()), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 330eecc1563e2..62b72b4f9750c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -2512,6 +2512,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { true, true, true, + true, null, null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java index 37a4cd4f783e4..2254c78a2910c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java @@ -39,6 +39,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { true, true, true, + true, null, null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java index 7a44ebae95738..6d4861212e286 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtAuthenticatorTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.junit.Before; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java index 3d4d9eae6acd0..789ac04c40622 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtIssuer.java @@ -14,7 +14,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.user.User; import java.io.Closeable; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java index bf6c64242701b..4f7b82a16e8f1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.user.User; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java index 7a0e138305b83..8a5daa642002e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmGenerateTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.user.User; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java index 40a613a0907c8..7697849179acf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmInspector.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import org.elasticsearch.xpack.core.security.authc.support.ClaimSetting; import java.net.URI; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java index 1bc49cb628464..ffc1fec1f5788 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java @@ -28,7 +28,6 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtAuthenticationToken; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.ClientAuthenticationType; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java index 7d90dffd7517c..6fab33b4d6adf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; -import org.elasticsearch.xpack.core.security.authc.jwt.JwtUtil; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java index 0b29b46b19b36..f26cd59f7532c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java @@ -67,6 +67,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -192,7 +193,11 @@ public void testAuthenticateTransportRequestFailsIfHeaderHasUnrecognizedCredenti final PlainActionFuture future = new PlainActionFuture<>(); authenticator.authenticate(AuthenticateAction.NAME, request, future); - final ElasticsearchSecurityException ex = expectThrows(ElasticsearchSecurityException.class, future::actionResult); + final ElasticsearchSecurityException ex = expectThrows( + ExecutionException.class, + ElasticsearchSecurityException.class, + future::result + ); assertThat(ex, TestMatchers.throwableWithMessage(Matchers.containsString("secondary user"))); assertThat(ex.getCause(), TestMatchers.throwableWithMessage(Matchers.containsString("credentials"))); } @@ -203,7 +208,11 @@ public void testAuthenticateRestRequestFailsIfHeaderHasUnrecognizedCredentials() final PlainActionFuture future = new PlainActionFuture<>(); authenticator.authenticateAndAttachToContext(request, future); - final ElasticsearchSecurityException ex = expectThrows(ElasticsearchSecurityException.class, future::actionResult); + final ElasticsearchSecurityException ex = expectThrows( + ExecutionException.class, + ElasticsearchSecurityException.class, + future::result + ); assertThat(ex, TestMatchers.throwableWithMessage(Matchers.containsString("secondary user"))); assertThat(ex.getCause(), TestMatchers.throwableWithMessage(Matchers.containsString("credentials"))); @@ -287,7 +296,11 @@ private void assertAuthenticateWithIncorrectPassword(Consumer future = new PlainActionFuture<>(); authenticator.authenticate(AuthenticateAction.NAME, request, future); - final SecondaryAuthentication secondaryAuthentication = future.actionResult(); + final SecondaryAuthentication secondaryAuthentication = future.result(); assertThat(secondaryAuthentication, Matchers.notNullValue()); assertThat(secondaryAuthentication.getAuthentication(), Matchers.notNullValue()); assertThat(secondaryAuthentication.getAuthentication().getEffectiveSubject().getUser(), equalTo(user)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index c860ceeafc0f4..2a084bacfaf76 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -411,6 +411,7 @@ private SecurityIndexManager.State indexState(boolean isUpToDate, ClusterHealthS true, true, true, + true, null, null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 9d9528ec6f48b..5f878480a7d0d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -203,7 +203,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; -import java.util.function.Consumer; +import java.util.function.ObjLongConsumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -1571,7 +1571,7 @@ public void testDenialErrorMessagesForBulkIngest() throws Exception { authorize(authentication, TransportShardBulkAction.ACTION_NAME, request); MappingUpdatePerformer mappingUpdater = (m, s, l) -> l.onResponse(null); - Consumer> waitForMappingUpdate = l -> l.onResponse(null); + ObjLongConsumer> waitForMappingUpdate = (l, mappingVersion) -> l.onResponse(null); PlainActionFuture> future = new PlainActionFuture<>(); IndexShard indexShard = mock(IndexShard.class); when(indexShard.getBulkOperationListener()).thenReturn(new BulkOperationListener() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 5b28c3dc39cfe..693bd9b868ede 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -1617,6 +1617,7 @@ public SecurityIndexManager.State dummyIndexState(boolean isIndexUpToDate, Clust true, true, true, + true, null, null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 6a2ac7721c9a1..d3b75210a5cbe 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -792,6 +792,7 @@ private SecurityIndexManager.State dummyState( true, true, true, + true, null, null, null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index e14a25088f749..bfa358d0b7d6e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -12,12 +12,20 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.delete.DeleteRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -28,6 +36,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.Reason; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.version.CompatibilityVersions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -45,17 +54,22 @@ import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkRolesResponse; +import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.RoleRestrictionTests; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; +import org.elasticsearch.xpack.security.authz.ReservedRoleNameChecker; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.security.support.SecuritySystemIndices; import org.elasticsearch.xpack.security.test.SecurityTestUtils; @@ -65,15 +79,19 @@ import org.mockito.Mockito; import java.io.IOException; +import java.lang.reflect.Field; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_FORMAT_SETTING; +import static org.elasticsearch.indices.SystemIndexDescriptor.VERSION_META_KEY; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomApplicationPrivileges; @@ -89,7 +107,10 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -97,16 +118,64 @@ public class NativeRolesStoreTests extends ESTestCase { private ThreadPool threadPool; + private final Client client = mock(Client.class); + @Before - public void createThreadPool() { + public void beforeNativeRoleStoreTests() { threadPool = new TestThreadPool("index audit trail update mapping tests"); + when(client.threadPool()).thenReturn(threadPool); + when(client.prepareIndex(SECURITY_MAIN_ALIAS)).thenReturn(new IndexRequestBuilder(client)); + when(client.prepareUpdate(any(), any())).thenReturn(new UpdateRequestBuilder(client)); + when(client.prepareDelete(any(), any())).thenReturn(new DeleteRequestBuilder(client, SECURITY_MAIN_ALIAS)); } @After - public void terminateThreadPool() throws Exception { + public void terminateThreadPool() { terminate(threadPool); } + private NativeRolesStore createRoleStoreForTest() { + return createRoleStoreForTest(Settings.builder().build()); + } + + private NativeRolesStore createRoleStoreForTest(Settings settings) { + new ReservedRolesStore(Set.of("superuser")); + final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(TransportVersion.current()); + final SecuritySystemIndices systemIndices = new SecuritySystemIndices(settings); + final FeatureService featureService = mock(FeatureService.class); + systemIndices.init(client, featureService, clusterService); + final SecurityIndexManager securityIndex = systemIndices.getMainIndexManager(); + // Create the index + securityIndex.clusterChanged(new ClusterChangedEvent("source", getClusterStateWithSecurityIndex(), getEmptyClusterState())); + + return new NativeRolesStore( + settings, + client, + TestUtils.newTestLicenseState(), + securityIndex, + clusterService, + mock(FeatureService.class), + new ReservedRoleNameChecker.Default(), + mock(NamedXContentRegistry.class) + ); + } + + private void putRole(NativeRolesStore rolesStore, RoleDescriptor roleDescriptor, ActionListener actionListener) + throws IOException { + if (randomBoolean()) { + rolesStore.putRole(WriteRequest.RefreshPolicy.IMMEDIATE, roleDescriptor, actionListener); + } else { + rolesStore.putRoles(WriteRequest.RefreshPolicy.IMMEDIATE, List.of(roleDescriptor), ActionListener.wrap(resp -> { + BulkRolesResponse.Item item = resp.getItems().get(0); + if (item.getResultType().equals("created")) { + actionListener.onResponse(true); + } else { + throw item.getCause(); + } + }, actionListener::onFailure)); + } + } + // test that we can read a role where field permissions are stored in 2.x format (fields:...) public void testBWCFieldPermissions() throws IOException { Path path = getDataPath("roles2xformat.json"); @@ -329,35 +398,28 @@ public void testPutOfRoleWithFlsDlsUnlicensed() throws IOException { final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(TransportVersion.current()); final FeatureService featureService = mock(FeatureService.class); final XPackLicenseState licenseState = mock(XPackLicenseState.class); - final AtomicBoolean methodCalled = new AtomicBoolean(false); final SecuritySystemIndices systemIndices = new SecuritySystemIndices(clusterService.getSettings()); systemIndices.init(client, featureService, clusterService); final SecurityIndexManager securityIndex = systemIndices.getMainIndexManager(); - + // Init for validation + new ReservedRolesStore(Set.of("superuser")); final NativeRolesStore rolesStore = new NativeRolesStore( Settings.EMPTY, client, licenseState, securityIndex, clusterService, - mock(FeatureService.class) - ) { - @Override - void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { - if (methodCalled.compareAndSet(false, true)) { - listener.onResponse(true); - } else { - fail("method called more than once!"); - } - } - }; + mock(FeatureService.class), + mock(ReservedRoleNameChecker.class), + mock(NamedXContentRegistry.class) + ); + // setup the roles store so the security index exists securityIndex.clusterChanged( new ClusterChangedEvent("fls_dls_license", getClusterStateWithSecurityIndex(), getEmptyClusterState()) ); - PutRoleRequest putRoleRequest = new PutRoleRequest(); RoleDescriptor flsRole = new RoleDescriptor( "fls", null, @@ -366,8 +428,9 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final null ); PlainActionFuture future = new PlainActionFuture<>(); - rolesStore.putRole(putRoleRequest, flsRole, future); + putRole(rolesStore, flsRole, future); ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + assertThat(e.getMessage(), containsString("field and document level security")); BytesReference matchAllBytes = XContentHelper.toXContent(QueryBuilders.matchAllQuery(), XContentType.JSON, false); @@ -378,7 +441,7 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final null ); future = new PlainActionFuture<>(); - rolesStore.putRole(putRoleRequest, dlsRole, future); + putRole(rolesStore, dlsRole, future); e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); assertThat(e.getMessage(), containsString("field and document level security")); @@ -396,22 +459,14 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final null ); future = new PlainActionFuture<>(); - rolesStore.putRole(putRoleRequest, flsDlsRole, future); + putRole(rolesStore, flsDlsRole, future); e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); assertThat(e.getMessage(), containsString("field and document level security")); - - RoleDescriptor noFlsDlsRole = new RoleDescriptor( - "no_fls_dls", - null, - new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("READ").build() }, - null - ); - future = new PlainActionFuture<>(); - rolesStore.putRole(putRoleRequest, noFlsDlsRole, future); - assertTrue(future.actionGet()); } - public void testPutRoleWithRemotePrivsUnsupportedMinNodeVersion() { + public void testPutRoleWithRemotePrivsUnsupportedMinNodeVersion() throws IOException { + // Init for validation + new ReservedRolesStore(Set.of("superuser")); enum TEST_MODE { REMOTE_INDICES_PRIVS, REMOTE_CLUSTER_PRIVS, @@ -449,7 +504,6 @@ enum TEST_MODE { final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(minTransportVersion); final XPackLicenseState licenseState = mock(XPackLicenseState.class); - final AtomicBoolean methodCalled = new AtomicBoolean(false); final SecuritySystemIndices systemIndices = new SecuritySystemIndices(clusterService.getSettings()); final FeatureService featureService = mock(FeatureService.class); @@ -462,21 +516,13 @@ enum TEST_MODE { licenseState, securityIndex, clusterService, - mock(FeatureService.class) - ) { - @Override - void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { - if (methodCalled.compareAndSet(false, true)) { - listener.onResponse(true); - } else { - fail("method called more than once!"); - } - } - }; + mock(FeatureService.class), + mock(ReservedRoleNameChecker.class), + mock(NamedXContentRegistry.class) + ); // setup the roles store so the security index exists securityIndex.clusterChanged(new ClusterChangedEvent("source", getClusterStateWithSecurityIndex(), getEmptyClusterState())); - PutRoleRequest putRoleRequest = new PutRoleRequest(); RoleDescriptor remoteIndicesRole = new RoleDescriptor( "remote", null, @@ -492,7 +538,7 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final null ); PlainActionFuture future = new PlainActionFuture<>(); - rolesStore.putRole(putRoleRequest, remoteIndicesRole, future); + putRole(rolesStore, remoteIndicesRole, future); IllegalStateException e = expectThrows( IllegalStateException.class, String.format(Locale.ROOT, "expected IllegalStateException, but not thrown for mode [%s]", testMode), @@ -515,22 +561,7 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final public void testGetRoleWhenDisabled() throws Exception { final Settings settings = Settings.builder().put(NativeRolesStore.NATIVE_ROLES_ENABLED, "false").build(); - final Client client = mock(Client.class); - final ClusterService clusterService = mock(ClusterService.class); - final XPackLicenseState licenseState = mock(XPackLicenseState.class); - final SecuritySystemIndices systemIndices = new SecuritySystemIndices(settings); - final FeatureService featureService = mock(FeatureService.class); - systemIndices.init(client, featureService, clusterService); - final SecurityIndexManager securityIndex = systemIndices.getMainIndexManager(); - - final NativeRolesStore store = new NativeRolesStore( - settings, - client, - licenseState, - securityIndex, - clusterService, - mock(FeatureService.class) - ); + NativeRolesStore store = createRoleStoreForTest(settings); final PlainActionFuture future = new PlainActionFuture<>(); store.getRoleDescriptors(Set.of(randomAlphaOfLengthBetween(4, 12)), future); @@ -541,6 +572,299 @@ public void testGetRoleWhenDisabled() throws Exception { Mockito.verifyNoInteractions(client); } + public void testReservedRole() { + final NativeRolesStore store = createRoleStoreForTest(); + final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names())); + + RoleDescriptor roleDescriptor = new RoleDescriptor( + roleName, + randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("*").grantedFields("*").deniedFields("foo").build() }, + randomApplicationPrivileges(), + randomClusterPrivileges(), + generateRandomStringArray(5, randomIntBetween(2, 8), true, true), + randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), + null, + randomRemoteIndicesPrivileges(1, 2), + null, + null, + randomAlphaOfLengthBetween(0, 20) + ); + ActionRequestValidationException exception = assertThrows(ActionRequestValidationException.class, () -> { + PlainActionFuture future = new PlainActionFuture<>(); + putRole(store, roleDescriptor, future); + future.actionGet(); + }); + + assertThat(exception.getMessage(), containsString("is reserved and may not be used")); + } + + public void testValidRole() throws IOException { + testValidRole(randomFrom("admin", "dept_a", "restricted")); + } + + public void testValidRoleWithInternalRoleName() throws IOException { + testValidRole(AuthenticationTestHelper.randomInternalRoleName()); + } + + private void testValidRole(String roleName) throws IOException { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + + RoleDescriptor roleDescriptor = new RoleDescriptor( + roleName, + randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("*").grantedFields("*").deniedFields("foo").build() }, + randomApplicationPrivileges(), + randomClusterPrivileges(), + generateRandomStringArray(5, randomIntBetween(2, 8), true, true), + null, + null, + null, + null, + null, + null + ); + + putRole(rolesStore, roleDescriptor, ActionListener.wrap(response -> fail(), exception -> fail())); + boolean indexCalled = false; + try { + verify(client, times(1)).index(any(IndexRequest.class), any()); + indexCalled = true; + } catch (AssertionError assertionError) { + // Index wasn't called + } + + boolean bulkCalled = false; + try { + verify(client, times(1)).bulk(any(BulkRequest.class), any()); + bulkCalled = true; + } catch (AssertionError assertionError) { + // bulk wasn't called + } + + assertTrue(bulkCalled || indexCalled); + } + + public void testCreationOfRoleWithMalformedQueryJsonFails() throws IOException { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + + String[] malformedQueryJson = new String[] { + "{ \"match_all\": { \"unknown_field\": \"\" } }", + "{ malformed JSON }", + "{ \"unknown\": {\"\"} }", + "{}" }; + + BytesReference query = new BytesArray(randomFrom(malformedQueryJson)); + + RoleDescriptor roleDescriptor = new RoleDescriptor( + "test", + randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), + new IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("idx1") + .privileges(new String[] { "read" }) + .query(query) + .allowRestrictedIndices(randomBoolean()) + .build() }, + randomApplicationPrivileges(), + randomClusterPrivileges(), + null, + null, + null, + null, + null, + null, + null + ); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + + putRole(rolesStore, roleDescriptor, ActionListener.wrap(responseRef::set, throwableRef::set)); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + Throwable t = throwableRef.get(); + assertThat(t, instanceOf(ElasticsearchParseException.class)); + assertThat( + t.getMessage(), + containsString( + "failed to parse field 'query' for indices [" + + Strings.arrayToCommaDelimitedString(new String[] { "idx1" }) + + "] at index privilege [0] of role descriptor" + ) + ); + } + + public void testCreationOfRoleWithUnsupportedQueryFails() throws IOException { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + + String hasChildQuery = "{ \"has_child\": { \"type\": \"child\", \"query\": { \"match_all\": {} } } }"; + String hasParentQuery = "{ \"has_parent\": { \"parent_type\": \"parent\", \"query\": { \"match_all\": {} } } }"; + + BytesReference query = new BytesArray(randomFrom(hasChildQuery, hasParentQuery)); + + RoleDescriptor roleDescriptor = new RoleDescriptor( + "test", + randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), + new IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("idx1") + .privileges(new String[] { "read" }) + .query(query) + .allowRestrictedIndices(randomBoolean()) + .build() }, + randomApplicationPrivileges(), + randomClusterPrivileges(), + null, + null, + null, + null, + null, + null, + null + ); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + putRole(rolesStore, roleDescriptor, ActionListener.wrap(responseRef::set, throwableRef::set)); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + Throwable t = throwableRef.get(); + assertThat(t, instanceOf(ElasticsearchParseException.class)); + assertThat( + t.getMessage(), + containsString( + "failed to parse field 'query' for indices [" + + Strings.arrayToCommaDelimitedString(new String[] { "idx1" }) + + "] at index privilege [0] of role descriptor" + ) + ); + } + + public void testManyValidRoles() throws IOException { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + List roleNames = List.of("test", "admin", "123"); + + List roleDescriptors = roleNames.stream() + .map( + roleName -> new RoleDescriptor( + roleName, + randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("*").grantedFields("*").deniedFields("foo").build() }, + randomApplicationPrivileges(), + randomClusterPrivileges(), + generateRandomStringArray(5, randomIntBetween(2, 8), true, true), + null, + null, + null, + null, + null, + null + ) + ) + .toList(); + + AtomicReference response = new AtomicReference<>(); + AtomicReference exception = new AtomicReference<>(); + rolesStore.putRoles(WriteRequest.RefreshPolicy.IMMEDIATE, roleDescriptors, ActionListener.wrap(response::set, exception::set)); + assertNull(exception.get()); + verify(client, times(1)).bulk(any(BulkRequest.class), any()); + } + + public void testBulkDeleteRoles() { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + + AtomicReference response = new AtomicReference<>(); + AtomicReference exception = new AtomicReference<>(); + rolesStore.deleteRoles( + List.of("test-role-1", "test-role-2", "test-role-3"), + WriteRequest.RefreshPolicy.IMMEDIATE, + ActionListener.wrap(response::set, exception::set) + ); + assertNull(exception.get()); + verify(client, times(1)).bulk(any(BulkRequest.class), any()); + } + + public void testBulkDeleteReservedRole() { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + + AtomicReference response = new AtomicReference<>(); + AtomicReference exception = new AtomicReference<>(); + rolesStore.deleteRoles( + List.of("superuser"), + WriteRequest.RefreshPolicy.IMMEDIATE, + ActionListener.wrap(response::set, exception::set) + ); + assertNull(exception.get()); + assertThat(response.get().getItems().size(), equalTo(1)); + BulkRolesResponse.Item item = response.get().getItems().get(0); + assertThat(item.getCause().getMessage(), equalTo("role [superuser] is reserved and cannot be deleted")); + assertThat(item.getRoleName(), equalTo("superuser")); + + verify(client, times(0)).bulk(any(BulkRequest.class), any()); + } + + /** + * Make sure all top level fields for a RoleDescriptor have default values to make sure they can be set to empty in an upsert + * call to the roles API + */ + public void testAllTopFieldsHaveEmptyDefaultsForUpsert() throws IOException, IllegalAccessException { + final NativeRolesStore rolesStore = createRoleStoreForTest(); + RoleDescriptor allNullDescriptor = new RoleDescriptor( + "all-null-descriptor", + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Set fieldsWithoutDefaultValue = Set.of( + RoleDescriptor.Fields.INDEX, + RoleDescriptor.Fields.NAMES, + RoleDescriptor.Fields.ALLOW_RESTRICTED_INDICES, + RoleDescriptor.Fields.RESOURCES, + RoleDescriptor.Fields.QUERY, + RoleDescriptor.Fields.PRIVILEGES, + RoleDescriptor.Fields.CLUSTERS, + RoleDescriptor.Fields.APPLICATION, + RoleDescriptor.Fields.FIELD_PERMISSIONS, + RoleDescriptor.Fields.FIELD_PERMISSIONS_2X, + RoleDescriptor.Fields.GRANT_FIELDS, + RoleDescriptor.Fields.EXCEPT_FIELDS, + RoleDescriptor.Fields.METADATA_FLATTENED, + RoleDescriptor.Fields.TRANSIENT_METADATA, + RoleDescriptor.Fields.RESTRICTION, + RoleDescriptor.Fields.WORKFLOWS + ); + + String serializedOutput = Strings.toString(rolesStore.createRoleXContentBuilder(allNullDescriptor)); + Field[] fields = RoleDescriptor.Fields.class.getFields(); + + for (Field field : fields) { + ParseField fieldValue = (ParseField) field.get(null); + if (fieldsWithoutDefaultValue.contains(fieldValue) == false) { + assertThat( + "New RoleDescriptor field without a default value detected. " + + "Set a value or add to excluded list if not expected to be set to empty through role APIs", + serializedOutput, + containsString(fieldValue.getPreferredName()) + ); + } + } + } + private ClusterService mockClusterServiceWithMinNodeVersion(TransportVersion transportVersion) { final ClusterService clusterService = mock(ClusterService.class, Mockito.RETURNS_DEEP_STUBS); when(clusterService.state().getMinTransportVersion()).thenReturn(transportVersion); @@ -552,8 +876,14 @@ private ClusterState getClusterStateWithSecurityIndex() { final boolean withAlias = randomBoolean(); final String securityIndexName = SECURITY_MAIN_ALIAS + (withAlias ? "-" + randomAlphaOfLength(5) : ""); + Settings.Builder settingsBuilder = indexSettings(IndexVersion.current(), 1, 0); + settingsBuilder.put(INDEX_FORMAT_SETTING.getKey(), SecuritySystemIndices.INTERNAL_MAIN_INDEX_FORMAT); + settingsBuilder.put(VERSION_META_KEY, 1); + MappingMetadata mappingMetadata = mock(MappingMetadata.class); + when(mappingMetadata.sourceAsMap()).thenReturn(Map.of("_meta", Map.of(VERSION_META_KEY, 1))); + when(mappingMetadata.getSha256()).thenReturn("test"); Metadata metadata = Metadata.builder() - .put(IndexMetadata.builder(securityIndexName).settings(indexSettings(IndexVersion.current(), 1, 0))) + .put(IndexMetadata.builder(securityIndexName).putMapping(mappingMetadata).settings(settingsBuilder)) .build(); if (withAlias) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java index 698809beb6d30..e3b00dfbcc6b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java @@ -61,6 +61,7 @@ public void testSecurityIndexStateChangeWillInvalidateAllRegisteredInvalidators( true, true, true, + true, null, new SystemIndexDescriptor.MappingsVersion(SecurityMainIndexMappingVersion.latest().id(), 0), null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutorTests.java index 3c3b322c28a2f..0f63e5302a5f1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationExecutorTests.java @@ -43,6 +43,8 @@ public class SecurityMigrationExecutorTests extends ESTestCase { private boolean clientShouldThrowException = false; + private AllocatedPersistentTask mockTask = mock(AllocatedPersistentTask.class); + @Before public void setUpMocks() { threadPool = mock(ThreadPool.class); @@ -78,8 +80,8 @@ public void testSuccessfulMigration() { client, new TreeMap<>(Map.of(1, generateMigration(migrateInvocations, true), 2, generateMigration(migrateInvocations, true))) ); - AllocatedPersistentTask mockTask = mock(AllocatedPersistentTask.class); - securityMigrationExecutor.nodeOperation(mockTask, mock(SecurityMigrationTaskParams.class), mock(PersistentTaskState.class)); + + securityMigrationExecutor.nodeOperation(mockTask, new SecurityMigrationTaskParams(0, true), mock(PersistentTaskState.class)); verify(mockTask, times(1)).markAsCompleted(); verify(mockTask, times(0)).markAsFailed(any()); assertEquals(2, updateIndexMigrationVersionActionInvocations); @@ -105,8 +107,7 @@ public void testNoMigrationMeetsRequirements() { ) ); - AllocatedPersistentTask mockTask = mock(AllocatedPersistentTask.class); - securityMigrationExecutor.nodeOperation(mockTask, mock(SecurityMigrationTaskParams.class), mock(PersistentTaskState.class)); + securityMigrationExecutor.nodeOperation(mockTask, new SecurityMigrationTaskParams(0, true), mock(PersistentTaskState.class)); verify(mockTask, times(1)).markAsCompleted(); verify(mockTask, times(0)).markAsFailed(any()); assertEquals(0, updateIndexMigrationVersionActionInvocations); @@ -136,8 +137,7 @@ public void testPartialMigration() { ) ); - AllocatedPersistentTask mockTask = mock(AllocatedPersistentTask.class); - securityMigrationExecutor.nodeOperation(mockTask, mock(SecurityMigrationTaskParams.class), mock(PersistentTaskState.class)); + securityMigrationExecutor.nodeOperation(mockTask, new SecurityMigrationTaskParams(0, true), mock(PersistentTaskState.class)); verify(mockTask, times(1)).markAsCompleted(); verify(mockTask, times(0)).markAsFailed(any()); assertEquals(2, updateIndexMigrationVersionActionInvocations); @@ -154,11 +154,7 @@ public void testNoMigrationNeeded() { new TreeMap<>(Map.of(1, generateMigration(migrateInvocations, true), 2, generateMigration(migrateInvocations, true))) ); - AllocatedPersistentTask mockTask = mock(AllocatedPersistentTask.class); - SecurityMigrationTaskParams taskParams = mock(SecurityMigrationTaskParams.class); - when(taskParams.getMigrationVersion()).thenReturn(7); - - securityMigrationExecutor.nodeOperation(mockTask, taskParams, mock(PersistentTaskState.class)); + securityMigrationExecutor.nodeOperation(mockTask, new SecurityMigrationTaskParams(7, true), mock(PersistentTaskState.class)); verify(mockTask, times(1)).markAsCompleted(); verify(mockTask, times(0)).markAsFailed(any()); assertEquals(0, updateIndexMigrationVersionActionInvocations); @@ -190,13 +186,11 @@ public int minMappingVersion() { })) ); - AllocatedPersistentTask mockTask = mock(AllocatedPersistentTask.class); - assertThrows( IllegalStateException.class, () -> securityMigrationExecutor.nodeOperation( mockTask, - mock(SecurityMigrationTaskParams.class), + new SecurityMigrationTaskParams(0, true), mock(PersistentTaskState.class) ) ); @@ -212,8 +206,7 @@ public void testUpdateMigrationVersionThrowsException() { new TreeMap<>(Map.of(1, generateMigration(migrateInvocations, true), 2, generateMigration(migrateInvocations, true))) ); clientShouldThrowException = true; - AllocatedPersistentTask mockTask = mock(AllocatedPersistentTask.class); - securityMigrationExecutor.nodeOperation(mockTask, mock(SecurityMigrationTaskParams.class), mock(PersistentTaskState.class)); + securityMigrationExecutor.nodeOperation(mockTask, new SecurityMigrationTaskParams(0, true), mock(PersistentTaskState.class)); verify(mockTask, times(1)).markAsFailed(any()); verify(mockTask, times(0)).markAsCompleted(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportCloseNotifyTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportCloseNotifyTests.java new file mode 100644 index 0000000000000..ec2881b989d0b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportCloseNotifyTests.java @@ -0,0 +1,277 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.bootstrap.Bootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import io.netty.util.concurrent.Future; + +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.http.AbstractHttpServerTransportTestCase; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.netty4.Netty4HttpServerTransport; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.tracing.Tracer; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.netty4.SharedGroupFactory; +import org.elasticsearch.transport.netty4.TLSConfig; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import java.security.cert.CertificateException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.BlockingDeque; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; + +import javax.net.ssl.SSLException; + +import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; + +public class SecurityNetty4HttpServerTransportCloseNotifyTests extends AbstractHttpServerTransportTestCase { + + private static T safePoll(BlockingQueue queue) { + try { + var t = queue.poll(5, TimeUnit.SECONDS); + if (t == null) { + throw new AssertionError("queue is empty"); + } else { + return t; + } + } catch (Exception e) { + throw new AssertionError(e); + } + } + + private static void safeAwait(Future nettyFuture) { + try { + nettyFuture.get(5, TimeUnit.SECONDS); + } catch (InterruptedException | ExecutionException | TimeoutException e) { + throw new AssertionError(e); + } + } + + /** + * Setup {@link Netty4HttpServerTransport} with SSL enabled and self-signed certificate. + * All HTTP requests accumulate in the dispatcher reqQueue. + * The server will not reply to request automatically, to send response poll the queue. + */ + private HttpServer setupHttpServer(String tlsProtocols) throws CertificateException { + var threadPool = new TestThreadPool("tls-close-notify"); + var dispatcher = new QueuedDispatcher(); + final Settings.Builder builder = Settings.builder(); + addSSLSettingsForNodePEMFiles(builder, "xpack.security.http.", randomBoolean()); + var settings = builder.put("xpack.security.http.ssl.enabled", true) + .put("path.home", createTempDir()) + .put("xpack.security.http.ssl.supported_protocols", tlsProtocols) + .build(); + var env = TestEnvironment.newEnvironment(settings); + var sslService = new SSLService(env); + var server = new Netty4HttpServerTransport( + settings, + new NetworkService(Collections.emptyList()), + threadPool, + xContentRegistry(), + dispatcher, + randomClusterSettings(), + new SharedGroupFactory(settings), + Tracer.NOOP, + new TLSConfig(sslService.getHttpTransportSSLConfiguration(), sslService::createSSLEngine), + null, + randomFrom((httpPreRequest, channel, listener) -> listener.onResponse(null), null) + ); + server.start(); + return new HttpServer(server, dispatcher, threadPool); + } + + /** + * Set up a Netty HTTPs client and connect to server. + * Configured with self-signed certificate trust. + * Server responses accumulate in the respQueue, and exceptions in the errQueue. + */ + private HttpClient setupHttpClient(HttpServer server) throws SSLException, InterruptedException { + var clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build(); + var remoteAddr = randomFrom(server.netty.boundAddress().boundAddresses()); + var respQueue = new LinkedBlockingDeque(); + var errQueue = new LinkedBlockingDeque(); + var bootstrap = new Bootstrap().group(new NioEventLoopGroup(1)) + .channel(NioSocketChannel.class) + .remoteAddress(remoteAddr.getAddress(), remoteAddr.getPort()) + .handler(new ChannelInitializer() { + @Override + protected void initChannel(SocketChannel ch) { + var p = ch.pipeline(); + p.addLast(clientSslCtx.newHandler(ch.alloc())); + p.addLast(new HttpRequestEncoder()); + p.addLast(new HttpResponseDecoder()); + p.addLast(new HttpObjectAggregator(server.netty.handlingSettings.maxContentLength() * 2)); + p.addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) { + respQueue.add(msg); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + errQueue.add(cause); + } + }); + } + }); + var channel = bootstrap.connect().sync().channel(); + return new HttpClient(bootstrap, channel, respQueue, errQueue); + } + + /** + * Setup server and client, establish ssl connection, blocks until handshake is done + */ + private ConnectionCtx connectClientAndServer(String tlsVersion) { + try { + var server = setupHttpServer(tlsVersion); + var client = setupHttpClient(server); + var ssl = client.channel.pipeline().get(SslHandler.class); + safeAwait(ssl.handshakeFuture()); + assertEquals(tlsVersion, ssl.engine().getSession().getProtocol()); + return new ConnectionCtx(tlsVersion, server, client); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private void runForAllTlsVersions(Consumer test) { + List.of("TLSv1.2", "TLSv1.3").forEach(test); + } + + /** + * This test ensures that sending close_notify from the client on the idle channel trigger close connection from the server. + */ + public void testCloseIdleConnection() { + runForAllTlsVersions(tlsVersion -> { + try (var ctx = connectClientAndServer(tlsVersion)) { + var ssl = ctx.client.channel.pipeline().get(SslHandler.class); + ssl.closeOutbound(); + safeAwait(ctx.client.channel.closeFuture()); + } + }); + } + + /** + * This tests ensures that sending close_notify after HTTP response close the channel immediately. + * It should be similar to idle test, but in this test we await http request and response. + */ + public void testSendCloseNotifyAfterHttpResponse() { + runForAllTlsVersions(tlsVersion -> { + try (var ctx = connectClientAndServer(tlsVersion)) { + ctx.client.channel.writeAndFlush(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/index")); + var serverRequestCtx = safePoll(ctx.server.dispatcher.reqQueue); + serverRequestCtx.restChannel.sendResponse(new RestResponse(RestStatus.OK, "")); + safePoll(ctx.client.respQueue); + var ssl = ctx.client.channel.pipeline().get(SslHandler.class); + ssl.closeOutbound(); + safeAwait(ctx.client.channel.closeFuture()); + } + }); + } + + /** + * This test ensures that sending close_notify with outstanding requests close channel immediately. + */ + public void testSendCloseNotifyBeforeHttpResponse() { + runForAllTlsVersions(tlsVersion -> { + try (var ctx = connectClientAndServer(tlsVersion)) { + var server = ctx.server; + var client = ctx.client; + + var nRequests = randomIntBetween(1, 5); + for (int i = 0; i < nRequests; i++) { + client.channel.writeAndFlush(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/index")); + } + assertBusy(() -> assertEquals(nRequests, server.dispatcher.reqQueue.size())); + + // after the server receives requests send close_notify, before server responses + var ssl = client.channel.pipeline().get(SslHandler.class); + ssl.closeOutbound(); + + safeAwait(ctx.client.channel.closeFuture()); + assertTrue(client.errQueue.isEmpty()); + assertTrue(client.respQueue.isEmpty()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + } + + private record HttpServer(Netty4HttpServerTransport netty, QueuedDispatcher dispatcher, ThreadPool threadPool) {} + + private record HttpClient( + Bootstrap netty, + Channel channel, + BlockingDeque respQueue, + BlockingDeque errQueue + ) {} + + private record ConnectionCtx(String tlsProtocol, HttpServer server, HttpClient client) implements AutoCloseable { + + @Override + public void close() { + // need to release not consumed requests, will complain about buffer leaks after GC + server.dispatcher.reqQueue.forEach(r -> r.request.getHttpRequest().release()); + server.netty.stop(); + server.threadPool.shutdownNow(); + safeAwait(client.netty.config().group().shutdownGracefully()); + } + } + + private static class QueuedDispatcher implements HttpServerTransport.Dispatcher { + BlockingQueue reqQueue = new LinkedBlockingDeque<>(); + BlockingDeque errQueue = new LinkedBlockingDeque<>(); + + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + reqQueue.add(new ReqCtx(request, channel, threadContext)); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + errQueue.add(new ErrCtx(channel, threadContext, cause)); + } + + record ReqCtx(RestRequest request, RestChannel restChannel, ThreadContext threadContext) {} + + record ErrCtx(RestChannel restChannel, ThreadContext threadContext, Throwable cause) {} + } + +} diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 377016e80f386..33965eca83aee 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -83,6 +83,7 @@ public TransportGetShutdownStatusAction( ) { super( GetShutdownStatusAction.NAME, + false, transportService, clusterService, threadPool, diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java index 1bb8e0183b2ea..207c683de0f49 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java @@ -40,15 +40,21 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.action.DeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; +import org.junit.Assert; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; @@ -69,7 +75,7 @@ private TransformState processJSON(ReservedSnapshotAction action, TransformState public void testDependencies() { var action = new ReservedSnapshotAction(); - assertTrue(action.optionalDependencies().contains(ReservedRepositoryAction.NAME)); + assertThat(action.optionalDependencies(), contains(ReservedRepositoryAction.NAME)); } public void testValidationFails() { @@ -79,7 +85,7 @@ public void testValidationFails() { ClusterState state = ClusterState.builder(clusterName).build(); ReservedSnapshotAction action = new ReservedSnapshotAction(); - TransformState prevState = new TransformState(state, Collections.emptySet()); + TransformState prevState = new TransformState(state, Set.of()); String badPolicyJSON = """ { @@ -100,9 +106,9 @@ public void testValidationFails() { } }"""; - assertEquals( - "Required [schedule]", - expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage() + assertThat( + expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage(), + is("Required [schedule]") ); } @@ -121,10 +127,10 @@ public void testActionAddRemove() throws Exception { String emptyJSON = ""; - TransformState prevState = new TransformState(state, Collections.emptySet()); + TransformState prevState = new TransformState(state, Set.of()); TransformState updatedState = processJSON(action, prevState, emptyJSON); - assertEquals(0, updatedState.keys().size()); + assertThat(updatedState.keys(), empty()); assertEquals(prevState.state(), updatedState.state()); String twoPoliciesJSON = """ @@ -337,9 +343,9 @@ public void testOperatorControllerFromJSONContent() throws IOException { AtomicReference x = new AtomicReference<>(); try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, testJSON)) { - controller.process("operator", parser, (e) -> x.set(e)); + controller.process("operator", parser, x::set); - assertTrue(x.get() instanceof IllegalStateException); + assertThat(x.get(), instanceOf(IllegalStateException.class)); assertThat(x.get().getMessage(), containsString("Error processing state change request for operator")); } @@ -357,11 +363,7 @@ public void testOperatorControllerFromJSONContent() throws IOException { ); try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, testJSON)) { - controller.process("operator", parser, (e) -> { - if (e != null) { - fail("Should not fail"); - } - }); + controller.process("operator", parser, Assert::assertNull); } } @@ -375,7 +377,7 @@ public void testDeleteSLMReservedStateHandler() { mock(ActionFilters.class), mock(IndexNameExpressionResolver.class) ); - assertEquals(ReservedSnapshotAction.NAME, deleteAction.reservedStateHandlerName().get()); + assertThat(deleteAction.reservedStateHandlerName().get(), equalTo(ReservedSnapshotAction.NAME)); var request = new DeleteSnapshotLifecycleAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "daily-snapshots1"); assertThat(deleteAction.modifiedKeys(request), containsInAnyOrder("daily-snapshots1")); @@ -391,7 +393,7 @@ public void testPutSLMReservedStateHandler() throws Exception { mock(ActionFilters.class), mock(IndexNameExpressionResolver.class) ); - assertEquals(ReservedSnapshotAction.NAME, putAction.reservedStateHandlerName().get()); + assertThat(putAction.reservedStateHandlerName().get(), equalTo(ReservedSnapshotAction.NAME)); String json = """ { diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index caae6dd393a0c..d2e5896a4cf77 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -25,7 +24,6 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.support.FilterBlobContainer; @@ -1563,26 +1561,23 @@ private void indexDocs(String indexName, int docIdOffset, int docCount) throws E // Ensure that the safe commit == latest commit assertBusy(() -> { - ShardStats stats = indicesAdmin().prepareStats(indexName) - .clear() - .get() - .asMap() - .entrySet() - .stream() - .filter(e -> e.getKey().shardId().getId() == 0) - .map(Map.Entry::getValue) - .findFirst() - .orElse(null); - assertThat(stats, is(notNullValue())); - assertThat(stats.getSeqNoStats(), is(notNullValue())); - - assertThat(stats.getSeqNoStats().getMaxSeqNo(), is(greaterThan(-1L))); - assertThat(stats.getSeqNoStats().getGlobalCheckpoint(), is(greaterThan(-1L))); - assertThat( - Strings.toString(stats.getSeqNoStats()), - stats.getSeqNoStats().getMaxSeqNo(), - equalTo(stats.getSeqNoStats().getGlobalCheckpoint()) - ); + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + var indexShardRoutingTable = clusterState.routingTable().index(indexName).shard(0); + assertThat(indexShardRoutingTable, is(notNullValue())); + + var assignedNodeId = indexShardRoutingTable.primaryShard().currentNodeId(); + var assignedNodeName = clusterState.nodes().resolveNode(assignedNodeId).getName(); + + var indexShard = internalCluster().getInstance(IndicesService.class, assignedNodeName) + .indexService(resolveIndex(indexName)) + .getShard(0); + assertThat(indexShard, is(notNullValue())); + + // The safe commit is determined using the last synced global checkpoint, hence we should wait until the translog is synced + // to cover cases where the translog is synced asynchronously + var lastSyncedGlobalCheckpoint = indexShard.getLastSyncedGlobalCheckpoint(); + var maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); + assertThat(lastSyncedGlobalCheckpoint, equalTo(maxSeqNo)); }, 60, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml index e5babad76eb05..bcee1691e033c 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/10_analyze.yml @@ -175,6 +175,6 @@ setup: - match: { status: 500 } - match: { error.type: repository_verification_exception } - - match: { error.reason: "/.*test_repo_slow..analysis.failed.*/" } + - match: { error.reason: "/.*test_repo_slow..Repository.analysis.timed.out.*/" } - match: { error.root_cause.0.type: repository_verification_exception } - match: { error.root_cause.0.reason: "/.*test_repo_slow..analysis.timed.out.after..1s.*/" } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java index 7715b9e8d42b8..2ca5685c83db3 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -363,6 +364,17 @@ public BytesReference onContendedCompareAndExchange(BytesRegister register, Byte } } + private static void assertAnalysisFailureMessage(String message) { + assertThat( + message, + allOf( + containsString("Elasticsearch observed the storage system underneath this repository behaved incorrectly"), + containsString("not suitable for use with Elasticsearch snapshots"), + containsString(ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS.toString()) + ) + ); + } + public void testTimesOutSpinningRegisterAnalysis() { final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo"); request.timeout(TimeValue.timeValueMillis(between(1, 1000))); @@ -375,7 +387,13 @@ public boolean compareAndExchangeReturnsWitness(String key) { } }); final var exception = expectThrows(RepositoryVerificationException.class, () -> analyseRepository(request)); - assertThat(exception.getMessage(), containsString("analysis failed")); + assertThat( + exception.getMessage(), + allOf( + containsString("Repository analysis timed out. Consider specifying a longer timeout"), + containsString(ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS.toString()) + ) + ); assertThat( asInstanceOf(RepositoryVerificationException.class, exception.getCause()).getMessage(), containsString("analysis timed out") @@ -391,7 +409,7 @@ public boolean compareAndExchangeReturnsWitness(String key) { } }); final var exception = expectThrows(RepositoryVerificationException.class, () -> analyseRepository(request)); - assertThat(exception.getMessage(), containsString("analysis failed")); + assertAnalysisFailureMessage(exception.getMessage()); assertThat( asInstanceOf(RepositoryVerificationException.class, ExceptionsHelper.unwrapCause(exception.getCause())).getMessage(), allOf(containsString("uncontended register operation failed"), containsString("did not observe any value")) @@ -407,7 +425,7 @@ public boolean acceptsEmptyRegister() { } }); final var exception = expectThrows(RepositoryVerificationException.class, () -> analyseRepository(request)); - assertThat(exception.getMessage(), containsString("analysis failed")); + assertAnalysisFailureMessage(exception.getMessage()); final var cause = ExceptionsHelper.unwrapCause(exception.getCause()); if (cause instanceof IOException ioException) { assertThat(ioException.getMessage(), containsString("empty register update rejected")); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java index 7b82b69a682fa..494d1d3fedcd9 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; @@ -387,6 +388,9 @@ public static class AsyncAction { private final List responses; private final RepositoryPerformanceSummary.Builder summary = new RepositoryPerformanceSummary.Builder(); + private final RepositoryVerificationException analysisCancelledException; + private final RepositoryVerificationException analysisTimedOutException; + public AsyncAction( TransportService transportService, BlobStoreRepository repository, @@ -410,6 +414,12 @@ public AsyncAction( this.listener = ActionListener.runBefore(listener, () -> cancellationListener.onResponse(null)); responses = new ArrayList<>(request.blobCount); + + this.analysisCancelledException = new RepositoryVerificationException(request.repositoryName, "analysis cancelled"); + this.analysisTimedOutException = new RepositoryVerificationException( + request.repositoryName, + "analysis timed out after [" + request.getTimeout() + "]" + ); } private boolean setFirstFailure(Exception e) { @@ -453,12 +463,7 @@ public void onFailure(Exception e) { assert e instanceof ElasticsearchTimeoutException : e; if (isRunning()) { // if this CAS fails then we're already failing for some other reason, nbd - setFirstFailure( - new RepositoryVerificationException( - request.repositoryName, - "analysis timed out after [" + request.getTimeout() + "]" - ) - ); + setFirstFailure(analysisTimedOutException); } } } @@ -472,7 +477,7 @@ public void run() { cancellationListener.addTimeout(request.getTimeout(), repository.threadPool(), EsExecutors.DIRECT_EXECUTOR_SERVICE); cancellationListener.addListener(new CheckForCancelListener()); - task.addListener(() -> setFirstFailure(new RepositoryVerificationException(request.repositoryName, "analysis cancelled"))); + task.addListener(() -> setFirstFailure(analysisCancelledException)); final Random random = new Random(request.getSeed()); final List nodes = getSnapshotNodes(discoveryNodes); @@ -873,13 +878,20 @@ private void sendResponse(final long listingStartTimeNanos, final long deleteSta ); } else { logger.debug(() -> "analysis of repository [" + request.repositoryName + "] failed", exception); - listener.onFailure( - new RepositoryVerificationException( - request.getRepositoryName(), - "analysis failed, you may need to manually remove [" + blobPath + "]", - exception - ) - ); + + final String failureDetail; + if (exception == analysisCancelledException) { + failureDetail = "Repository analysis was cancelled."; + } else if (exception == analysisTimedOutException) { + failureDetail = Strings.format(""" + Repository analysis timed out. Consider specifying a longer timeout using the [?timeout] request parameter. See \ + [%s] for more information.""", ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS); + } else { + failureDetail = repository.getAnalysisFailureExtraDetail(); + } + listener.onFailure(new RepositoryVerificationException(request.getRepositoryName(), Strings.format(""" + %s Elasticsearch attempted to remove the data it wrote at [%s] but may have left some behind. If so, \ + please now remove this data manually.""", failureDetail, blobPath), exception)); } } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java index a8f437f476ada..04b194c2ec208 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.geometry.utils.WellKnownBinary; @@ -36,7 +35,6 @@ import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.GeoShapeIndexer; import org.elasticsearch.index.mapper.GeoShapeParser; import org.elasticsearch.index.mapper.GeoShapeQueryable; @@ -80,14 +78,8 @@ import java.util.function.Function; /** - * Extension of {@link org.elasticsearch.index.mapper.GeoShapeFieldMapper} that supports docValues - * * FieldMapper for indexing {@link LatLonShape}s. *

      - * Currently Shapes can only be indexed and can only be queried using - * {@link org.elasticsearch.index.query.GeoShapeQueryBuilder}, consequently - * a lot of behavior in this Mapper is disabled. - *

      * Format supported: *

      * "field" : { @@ -104,8 +96,6 @@ public class GeoShapeWithDocValuesFieldMapper extends AbstractShapeGeometryFieldMapper { public static final String CONTENT_TYPE = "geo_shape"; - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(GeoShapeFieldMapper.class); - private static Builder builder(FieldMapper in) { return ((GeoShapeWithDocValuesFieldMapper) in).builder; } @@ -173,7 +163,7 @@ private FieldValues scriptValues() { GeometryFieldScript.Factory factory = scriptCompiler.compile(this.script.get(), GeometryFieldScript.CONTEXT); return factory == null ? null - : (lookup, ctx, doc, consumer) -> factory.newFactory(name(), script.get().getParams(), lookup, OnScriptError.FAIL) + : (lookup, ctx, doc, consumer) -> factory.newFactory(leafName(), script.get().getParams(), lookup, OnScriptError.FAIL) .newInstance(ctx) .runForDoc(doc, consumer); } @@ -194,7 +184,7 @@ public GeoShapeWithDocValuesFieldMapper build(MapperBuilderContext context) { ); GeoShapeParser parser = new GeoShapeParser(geometryParser, orientation.get().value()); GeoShapeWithDocValuesFieldType ft = new GeoShapeWithDocValuesFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.get(), hasDocValues.get(), stored.get(), @@ -206,7 +196,7 @@ public GeoShapeWithDocValuesFieldMapper build(MapperBuilderContext context) { ); if (script.get() == null) { return new GeoShapeWithDocValuesFieldMapper( - name(), + leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, @@ -216,7 +206,7 @@ public GeoShapeWithDocValuesFieldMapper build(MapperBuilderContext context) { ); } return new GeoShapeWithDocValuesFieldMapper( - name(), + leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, @@ -458,7 +448,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { return new Builder( - simpleName(), + leafName(), builder.version, builder.scriptCompiler, builder.ignoreMalformed.getDefaultValue().value(), @@ -476,7 +466,7 @@ public GeoShapeWithDocValuesFieldType fieldType() { protected void checkIncomingMergeType(FieldMapper mergeWith) { if (mergeWith instanceof GeoShapeWithDocValuesFieldMapper == false && CONTENT_TYPE.equals(mergeWith.typeName())) { throw new IllegalArgumentException( - "mapper [" + name() + "] of type [geo_shape] cannot change strategy from [BKD] to [recursive]" + "mapper [" + fullPath() + "] of type [geo_shape] cannot change strategy from [BKD] to [recursive]" ); } super.checkIncomingMergeType(mergeWith); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index 1657a3bf7fbce..d98fe7fdfc6ec 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.geo.GeometryFormatterFactory; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; @@ -23,7 +22,6 @@ import org.elasticsearch.index.mapper.AbstractPointGeometryFieldMapper; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.query.SearchExecutionContext; @@ -49,8 +47,6 @@ public class PointFieldMapper extends AbstractPointGeometryFieldMapper { public static final String CONTENT_TYPE = "point"; - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(GeoShapeFieldMapper.class); - private static Builder builder(FieldMapper in) { return ((PointFieldMapper) in).builder; } @@ -105,14 +101,14 @@ public FieldMapper build(MapperBuilderContext context) { ); } CartesianPointParser parser = new CartesianPointParser( - name(), + leafName(), p -> CartesianPoint.parsePoint(p, ignoreZValue.get().value()), nullValue.get(), ignoreZValue.get().value(), ignoreMalformed.get().value() ); PointFieldType ft = new PointFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.get(), stored.get(), hasDocValues.get(), @@ -120,7 +116,7 @@ public FieldMapper build(MapperBuilderContext context) { nullValue.get(), meta.get() ); - return new PointFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new PointFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); } } @@ -177,7 +173,7 @@ public PointFieldType fieldType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), builder.ignoreMalformed.getDefaultValue().value()).init(this); + return new Builder(leafName(), builder.ignoreMalformed.getDefaultValue().value()).init(this); } public static class PointFieldType extends AbstractPointFieldType implements ShapeQueryable { diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index 83e434f829591..91a118f964064 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -25,7 +24,6 @@ import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.query.SearchExecutionContext; @@ -70,8 +68,6 @@ public class ShapeFieldMapper extends AbstractShapeGeometryFieldMapper { public static final String CONTENT_TYPE = "shape"; - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(GeoShapeFieldMapper.class); - private static Builder builder(FieldMapper in) { return ((ShapeFieldMapper) in).builder; } @@ -118,14 +114,14 @@ public ShapeFieldMapper build(MapperBuilderContext context) { ); Parser parser = new ShapeParser(geometryParser); ShapeFieldType ft = new ShapeFieldType( - context.buildFullName(name()), + context.buildFullName(leafName()), indexed.get(), hasDocValues.get(), orientation.get().value(), parser, meta.get() ); - return new ShapeFieldMapper(name(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new ShapeFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); } } @@ -237,7 +233,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { return new Builder( - simpleName(), + leafName(), builder.version, builder.ignoreMalformed.getDefaultValue().value(), builder.coerce.getDefaultValue().value() diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml index 7ad16faae2314..07ed83e1f8863 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml @@ -31,6 +31,7 @@ lifecycle: data_retention: 10d data_stream: {} + - is_true: acknowledged - do: indices.create_data_stream: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/20_standard_index.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/20_standard_index.yml index c767e3baac38f..971e276aab32a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/20_standard_index.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/20_standard_index.yml @@ -187,3 +187,61 @@ enrich documents over _bulk via an alias: - do: enrich.delete_policy: name: test_alias_policy + +--- +enrich stats REST response structure: + - requires: + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_enrich/stats + capabilities: + - size-in-bytes + reason: "Capability required to run test" + + - do: + ingest.simulate: + id: test_pipeline + body: > + { + "docs": [ + { + "_index": "enrich-cache-stats-index", + "_id": "1", + "_source": {"baz": "quick", "c": 1} + }, + { + "_index": "enrich-cache-stats-index", + "_id": "2", + "_source": {"baz": "lazy", "c": 2} + }, + { + "_index": "enrich-cache-stats-index", + "_id": "3", + "_source": {"baz": "slow", "c": 3} + } + ] + } + - length: { docs: 3 } + + # This test's main purpose is to verify the REST response structure. + # So, rather than assessing specific values, we only assess the existence of fields. + - do: + enrich.stats: {} + - exists: executing_policies + - is_true: coordinator_stats + # We know there will be at least one node, but we don't want to be dependent on the exact number of nodes. + - is_true: coordinator_stats.0.node_id + - exists: coordinator_stats.0.queue_size + - exists: coordinator_stats.0.remote_requests_current + - exists: coordinator_stats.0.remote_requests_total + - exists: coordinator_stats.0.executed_searches_total + - is_true: cache_stats + - is_true: cache_stats.0.node_id + - exists: cache_stats.0.count + - exists: cache_stats.0.hits + - exists: cache_stats.0.misses + - exists: cache_stats.0.evictions + - exists: cache_stats.0.hits_time_in_millis + - exists: cache_stats.0.misses_time_in_millis + - exists: cache_stats.0.size_in_bytes diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml index b91343d03d3d4..cffc161b11539 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml @@ -303,3 +303,38 @@ - match: { values.0.2: [1, 2] } - match: { values.0.3: [1, 2] } - match: { values.0.4: [1.1, 2.2] } + + +--- +"grok with duplicate names and different types #110533": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [grok_validation] + reason: "fixed grok validation with patterns containing the same attribute multiple times with different types" + - do: + indices.create: + index: test_grok + body: + mappings: + properties: + first_name : + type : keyword + last_name: + type: keyword + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "test_grok" } } + - { "first_name": "Georgi", "last_name":"Facello" } + + - do: + catch: '/Invalid GROK pattern \[%\{NUMBER:foo\} %\{WORD:foo\}\]: the attribute \[foo\] is defined multiple times with different types/' + esql.query: + body: + query: 'FROM test_grok | KEEP name | WHERE last_name == "Facello" | EVAL name = concat("1 ", last_name) | GROK name "%{NUMBER:foo} %{WORD:foo}"' diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml index f3403ca8751c0..aac60d9aaa8d0 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml @@ -147,6 +147,9 @@ setup: - '{"index": {}}' - '{"@timestamp": "2023-10-23T12:15:03.360Z", "client_ip": "172.21.2.162", "event_duration": "3450233", "message": "Connected to 10.1.0.3"}' +############################################################################################################ +# Test a single index as a control of the expected results + --- load single index ip_long: - do: @@ -173,9 +176,6 @@ load single index ip_long: - match: { values.0.3: 1756467 } - match: { values.0.4: "Connected to 10.1.0.1" } -############################################################################################################ -# Test a single index as a control of the expected results - --- load single index keyword_keyword: - do: @@ -202,6 +202,83 @@ load single index keyword_keyword: - match: { values.0.3: "1756467" } - match: { values.0.4: "Connected to 10.1.0.1" } +--- +load single index ip_long and aggregate by client_ip: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [casting_operator] + reason: "Casting operator and introduced in 8.15.0" + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_long | STATS count = COUNT(*) BY client_ip::ip | SORT count DESC, `client_ip::ip` ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "client_ip::ip" } + - match: { columns.1.type: "ip" } + - length: { values: 4 } + - match: { values.0.0: 4 } + - match: { values.0.1: "172.21.3.15" } + - match: { values.1.0: 1 } + - match: { values.1.1: "172.21.0.5" } + - match: { values.2.0: 1 } + - match: { values.2.1: "172.21.2.113" } + - match: { values.3.0: 1 } + - match: { values.3.1: "172.21.2.162" } + +--- +load single index ip_long and aggregate client_ip my message: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [casting_operator] + reason: "Casting operator and introduced in 8.15.0" + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_ip_long | STATS count = COUNT(client_ip::ip) BY message | SORT count DESC, message ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "message" } + - match: { columns.1.type: "keyword" } + - length: { values: 5 } + - match: { values.0.0: 3 } + - match: { values.0.1: "Connection error" } + - match: { values.1.0: 1 } + - match: { values.1.1: "Connected to 10.1.0.1" } + - match: { values.2.0: 1 } + - match: { values.2.1: "Connected to 10.1.0.2" } + - match: { values.3.0: 1 } + - match: { values.3.1: "Connected to 10.1.0.3" } + - match: { values.4.0: 1 } + - match: { values.4.1: "Disconnected" } + +--- +load single index ip_long stats invalid grouping: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [casting_operator] + reason: "Casting operator and introduced in 8.15.0" + - do: + catch: '/Unknown column \[x\]/' + esql.query: + body: + query: 'FROM events_ip_long | STATS count = COUNT(client_ip::ip) BY x' + ############################################################################################################ # Test two indices where the event_duration is mapped as a LONG and as a KEYWORD @@ -512,6 +589,83 @@ load two indices, convert, rename but not drop ambiguous field client_ip: - match: { values.1.5: "172.21.3.15" } - match: { values.1.6: "172.21.3.15" } +--- +load two indexes and group by converted client_ip: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [casting_operator, union_types_agg_cast] + reason: "Casting operator and Union types introduced in 8.15.0" + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | STATS count = COUNT(*) BY client_ip::ip | SORT count DESC, `client_ip::ip` ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "client_ip::ip" } + - match: { columns.1.type: "ip" } + - length: { values: 4 } + - match: { values.0.0: 8 } + - match: { values.0.1: "172.21.3.15" } + - match: { values.1.0: 2 } + - match: { values.1.1: "172.21.0.5" } + - match: { values.2.0: 2 } + - match: { values.2.1: "172.21.2.113" } + - match: { values.3.0: 2 } + - match: { values.3.1: "172.21.2.162" } + +--- +load two indexes and aggregate converted client_ip: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [casting_operator, union_types_agg_cast] + reason: "Casting operator and Union types introduced in 8.15.0" + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM events_*_long | STATS count = COUNT(client_ip::ip) BY message | SORT count DESC, message ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "message" } + - match: { columns.1.type: "keyword" } + - length: { values: 5 } + - match: { values.0.0: 6 } + - match: { values.0.1: "Connection error" } + - match: { values.1.0: 2 } + - match: { values.1.1: "Connected to 10.1.0.1" } + - match: { values.2.0: 2 } + - match: { values.2.1: "Connected to 10.1.0.2" } + - match: { values.3.0: 2 } + - match: { values.3.1: "Connected to 10.1.0.3" } + - match: { values.4.0: 2 } + - match: { values.4.1: "Disconnected" } + +--- +load two indexes, convert client_ip and group by something invalid: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [casting_operator, union_types_agg_cast] + reason: "Casting operator and Union types introduced in 8.15.0" + - do: + catch: '/Unknown column \[x\]/' + esql.query: + body: + query: 'FROM events_*_long | STATS count = COUNT(client_ip::ip) BY x' + ############################################################################################################ # Test four indices with both the client_IP (IP and KEYWORD) and event_duration (LONG and KEYWORD) mappings diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/170_no_replicas.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/170_no_replicas.yml new file mode 100644 index 0000000000000..6ac5b2ca68d5c --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/170_no_replicas.yml @@ -0,0 +1,181 @@ +--- +setup: + - requires: + cluster_features: ["gte_v8.15.0"] + reason: "Planning bugs for locally missing fields fixed in v 8.15" + test_runner_features: allowed_warnings_regex + - do: + indices.create: + index: test1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name1: + type: keyword + - do: + bulk: + index: "test1" + refresh: true + body: + - { "index": { } } + - { "name1": "1"} + - do: + indices.create: + index: test2 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name2: + type: keyword + - do: + bulk: + index: "test2" + refresh: true + body: + - { "index": { } } + - { "name2": "2"} + + - do: + indices.create: + index: test3 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name3: + type: keyword + - do: + bulk: + index: "test3" + refresh: true + body: + - { "index": { } } + - { "name3": "3"} + + - do: + indices.create: + index: test4 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name4: + type: keyword + - do: + bulk: + index: "test4" + refresh: true + body: + - { "index": { } } + - { "name4": "4"} + + - do: + indices.create: + index: test5 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name5: + type: keyword + - do: + bulk: + index: "test5" + refresh: true + body: + - { "index": { } } + - { "name5": "5"} + + - do: + indices.create: + index: test6 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name6: + type: keyword + - do: + bulk: + index: "test6" + refresh: true + body: + - { "index": { } } + - { "name6": "6"} + + - do: + indices.create: + index: test7 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name7: + type: keyword + - do: + bulk: + index: "test7" + refresh: true + body: + - { "index": { } } + - { "name7": "7"} + + - do: + indices.create: + index: test8 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + name8: + type: keyword + - do: + bulk: + index: "test8" + refresh: true + body: + - { "index": { } } + - { "name8": "8"} + +--- +"Test From 1": + - do: + esql.query: + body: + query: 'FROM test* | MV_EXPAND name1 | KEEP name1 | SORT name1 NULLS LAST | LIMIT 1' + + - match: {columns.0.name: "name1"} + - match: {columns.0.type: "keyword"} + - length: { values: 1 } + - match: {values.0.0: "1"} + +--- +"Test From 5": + - do: + esql.query: + body: + query: 'FROM test* | MV_EXPAND name5 | KEEP name5 | SORT name5 NULLS LAST | LIMIT 1' + + - match: {columns.0.name: "name5"} + - match: {columns.0.type: "keyword"} + - length: { values: 1 } + - match: {values.0.0: "5"} + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/learning_to_rank_rescorer.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/learning_to_rank_rescorer.yml index dac7b48617a2f..5c0096e9666fc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/learning_to_rank_rescorer.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/learning_to_rank_rescorer.yml @@ -9,22 +9,37 @@ setup: body: > { "description": "super complex model for tests", - "input": {"field_names": ["cost", "product"]}, "inference_config": { "learning_to_rank": { + "feature_extractors": [ + { + "query_extractor": { + "feature_name": "cost", + "query": {"script_score": {"query": {"match_all":{}}, "script": {"source": "return doc['cost'].value;"}}} + } + }, + { + "query_extractor": { + "feature_name": "type_tv", + "query": {"term": {"product": "TV"}} + } + }, + { + "query_extractor": { + "feature_name": "type_vcr", + "query": {"term": {"product": "VCR"}} + } + }, + { + "query_extractor": { + "feature_name": "type_laptop", + "query": {"term": {"product": "Laptop"}} + } + } + ] } }, "definition": { - "preprocessors" : [{ - "one_hot_encoding": { - "field": "product", - "hot_map": { - "TV": "type_tv", - "VCR": "type_vcr", - "Laptop": "type_laptop" - } - } - }], "trained_model": { "ensemble": { "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop"], @@ -246,3 +261,65 @@ setup: } } - length: { hits.hits: 0 } +--- +"Test model input validation": + - skip: + features: headers + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + catch: bad_request + ml.put_trained_model: + model_id: bad-model + body: > + { + "description": "a bad model", + "input": { + "field_names": ["cost"] + }, + "inference_config": { + "learning_to_rank": { } + }, + "definition": { + "trained_model": { + "ensemble": { + "feature_names": ["cost"], + "target_type": "regression", + "trained_models": [ + { + "tree": { + "feature_names": [ + "cost" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 400, + "decision_type": "lte", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 5.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 + } + ], + "target_type": "regression" + } + } + ] + } + } + } + } + + - match: { status: 400 } + - match: { error.root_cause.0.type: "action_request_validation_exception" } + - match: { error.root_cause.0.reason: "Validation Failed: 1: cannot specify [input.field_names] for a model of type [learning_to_rank];" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml index 8dee722bbb185..21a5a4736675d 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml @@ -96,6 +96,8 @@ setup: --- "Test text expansion search": + - requires: + test_runner_features: [ "allowed_warnings" ] - do: search: index: index-with-rank-features @@ -105,13 +107,17 @@ setup: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test text expansion search with pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -126,13 +132,17 @@ setup: pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test named, boosted text expansion search with pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: search: @@ -146,6 +156,9 @@ setup: pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } - match: { hits.hits.0._score: 3.0 } @@ -164,15 +177,19 @@ setup: tokens_weight_threshold: 0.4 _name: i-like-naming-my-queries boost: 100.0 + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } - - match: { hits.hits.0.matched_queries: ["i-like-naming-my-queries"] } + - match: { hits.hits.0.matched_queries: [ "i-like-naming-my-queries" ] } - match: { hits.hits.0._score: 300.0 } --- "Test text expansion search with default pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -184,14 +201,18 @@ setup: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" - pruning_config: {} + pruning_config: { } + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test text expansion search with weighted tokens rescoring only pruned tokens": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "pruning introduced in 8.13.0" - do: @@ -207,12 +228,16 @@ setup: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 0 } --- "Test weighted tokens search": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -222,18 +247,22 @@ setup: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 1 tokens_weight_threshold: 0.4 only_score_pruned_tokens: false + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test weighted tokens search with default pruning config": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -243,15 +272,19 @@ setup: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] - pruning_config: {} + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] + pruning_config: { } + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- "Test weighted tokens search only scoring pruned tokens": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -261,17 +294,21 @@ setup: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] + tokens: [ { "the": 1.0 }, { "comforter": 1.0 }, { "smells": 1.0 }, { "bad": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 0 } --- "Test weighted tokens search that prunes tokens based on frequency": - requires: - cluster_features: ["gte_v8.13.0"] + test_runner_features: [ "allowed_warnings" ] + cluster_features: [ "gte_v8.13.0" ] reason: "weighted token search introduced in 8.13.0" - do: @@ -281,17 +318,20 @@ setup: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}, {"is": 1.0}, {"the": 1.0}, {"best": 1.0}, {"of": 1.0}, {"the": 1.0}, {"bunch": 1.0}] + tokens: [ { "the": 1.0 }, { "octopus": 1.0 }, { "comforter": 1.0 }, { "is": 1.0 }, { "the": 1.0 }, { "best": 1.0 }, { "of": 1.0 }, { "the": 1.0 }, { "bunch": 1.0 } ] pruning_config: tokens_freq_ratio_threshold: 3 tokens_weight_threshold: 0.4 only_score_pruned_tokens: true + allowed_warnings: + - "weighted_tokens is deprecated and will be removed. Use sparse_vector instead." + - match: { hits.total.value: 0 } --- "Test text-expansion that displays error for invalid queried field type": - requires: - cluster_features: ["gte_v8.14.0"] + cluster_features: [ "gte_v8.14.0" ] reason: "validation for invalid field type introduced in 8.14.0" - do: @@ -304,4 +344,4 @@ setup: source_text: model_id: text_expansion_model model_text: "octopus comforter smells" - pruning_config: {} + pruning_config: { } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_rank_features.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_rank_features.yml index 7991566bfe818..6da86010205af 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_rank_features.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_rank_features.yml @@ -98,6 +98,8 @@ setup: --- "Test text expansion search": + - requires: + test_runner_features: [ "allowed_warnings" ] - do: search: index: index-with-rank-features @@ -107,5 +109,8 @@ setup: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_sparse_vector.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_sparse_vector.yml index 50a3fa7e22d58..08ce51c8d17f9 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_sparse_vector.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search_sparse_vector.yml @@ -98,6 +98,8 @@ setup: --- "Test text expansion search": + - requires: + test_runner_features: [ "allowed_warnings" ] - do: search: index: index-with-rank-features @@ -107,5 +109,8 @@ setup: ml.tokens: model_id: text_expansion_model model_text: "octopus comforter smells" + allowed_warnings: + - "text_expansion is deprecated. Use sparse_vector instead." + - match: { hits.total.value: 4 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml index bb784f52884f6..ef8fab9ca7b6d 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml @@ -15,5 +15,5 @@ setup: # This is fragile - it needs to be updated every time we add a new cluster/index privilege # I would much prefer we could just check that specific entries are in the array, but we don't have # an assertion for that - - length: { "cluster" : 59 } + - length: { "cluster" : 61 } - length: { "index" : 22 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/50_remote_only.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/50_remote_only.yml index cc60b68069195..1b5ce381319d2 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/50_remote_only.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/50_remote_only.yml @@ -71,6 +71,22 @@ teardown: - match: { remote_role.remote_cluster.0.clusters.0: "my_remote*" } - match: { remote_role.remote_cluster.0.clusters.1: "my_remote2*" } + # the query roles endpoint also shows the same role info + - do: + security.query_role: + body: > + {} + - match: { total: 1 } + - match: { count: 1 } + - match: { roles.0.name: "remote_role" } + - match: { roles.0.remote_indices.0.names.0: "logs*" } + - match: { roles.0.remote_indices.0.privileges.0: "read" } + - match: { roles.0.remote_indices.0.allow_restricted_indices: false } + - match: { roles.0.remote_indices.0.clusters.0: "*" } + - match: { roles.0.remote_cluster.0.privileges.0: "monitor_enrich" } + - match: { roles.0.remote_cluster.0.clusters.0: "my_remote*" } + - match: { roles.0.remote_cluster.0.clusters.1: "my_remote2*" } + - do: headers: Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/60_bulk_roles.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/60_bulk_roles.yml new file mode 100644 index 0000000000000..c7a707f437e0c --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/roles/60_bulk_roles.yml @@ -0,0 +1,98 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + security.put_user: + username: "joe" + body: > + { + "password": "s3krit-password", + "roles" : [ "admin_role" ] + } + +--- +teardown: + - do: + security.delete_user: + username: "joe" + ignore: 404 +--- +"Test bulk put and delete roles api": + - do: + security.bulk_put_role: + body: > + { + "roles": { + "admin_role": { + "cluster": [ + "all" + ], + "metadata": { + "key1": "val1", + "key2": "val2" + }, + "indices": [ + { + "names": "*", + "privileges": [ + "all" + ] + } + ] + }, + "role_with_description": { + "description": "Allows all security-related operations such as CRUD operations on users and roles and cache clearing.", + "cluster": [ + "manage_security" + ] + } + } + } + - match: { created: ["admin_role", "role_with_description"] } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" + security.get_role: + name: "admin_role" + - match: { admin_role.cluster.0: "all" } + - match: { admin_role.metadata.key1: "val1" } + - match: { admin_role.metadata.key2: "val2" } + - match: { admin_role.indices.0.names.0: "*" } + - match: { admin_role.indices.0.privileges.0: "all" } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" + security.get_role: + name: "role_with_description" + - match: { role_with_description.cluster.0: "manage_security" } + - match: { role_with_description.description: "Allows all security-related operations such as CRUD operations on users and roles and cache clearing." } + + # query match_all roles + - do: + headers: + Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" + security.query_role: + body: > + { + "query": { "match_all": {} }, "sort": ["name"] + } + - match: { total: 2 } + - match: { count: 2 } + - match: { roles.0.name: "admin_role" } + - match: { roles.1.name: "role_with_description" } + + - do: + security.bulk_delete_role: + body: > + { + "names": ["admin_role", "role_with_description"] + } + - match: { deleted.0: "admin_role" } + - match: { deleted.1: "role_with_description" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml index fc2e22d857358..b971c246ac50a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml @@ -356,6 +356,7 @@ Field with ignored_malformed: Authorization: "ApiKey ${credentials}" search: index: index_fls + sort: name - match: { hits.hits.0._source.name: A } - is_false: "hits.hits.0._source.secret" - match: { hits.hits.1._source.name: B } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml new file mode 100644 index 0000000000000..769b9d848ba35 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/40_document_level_security_synthetic_source.yml @@ -0,0 +1,403 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + +--- +Filter on single field: + - do: + indices.create: + index: index_dls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + + - do: + bulk: + index: index_dls + refresh: true + body: + - '{"create": { "_id": "1a" }}' + - '{"name": "A", "type": "foo"}' + - '{"create": { "_id": "2a" }}' + - '{"name": "B", "type": "bar"}' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_dls" ] + privileges: [ "read" ] + query: + match: + name: A + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_dls + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: 1a } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.type: foo } + - match: { hits.hits.1._id: 2a } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.type: bar } + + - do: + get: + index: index_dls + id: 2a + - match: { _source.name: B } + - match: { _source.type: bar } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_dls + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: 1a } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.type: foo } + + - do: + catch: missing + headers: + Authorization: "ApiKey ${credentials}" + get: + index: index_dls + id: 2a + +--- +Filter on nested field: + - do: + indices.create: + index: index_dls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: nested + properties: + secret: + type: keyword + + - do: + bulk: + index: index_dls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object": [ { "secret": "mission" }, { "secret": "nomatch" } ] }' + - '{"create": { }}' + - '{"name": "B", "object": { "secret": "mission", "public": "interest" } }' + - '{"create": { }}' + - '{"name": "C", "object": { "foo": "bar" } }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_dls" ] + privileges: [ "read" ] + query: + nested: + path: object + query: + term: + object.secret: mission + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_dls + sort: name + - match: { hits.total.value: 3 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.0.secret: mission } + - match: { hits.hits.0._source.object.1.secret: nomatch } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.object.secret: mission } + - match: { hits.hits.1._source.object.public: interest } + - match: { hits.hits.2._source.name: C } + - match: { hits.hits.2._source.object.foo: bar } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_dls + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.0.secret: mission } + - match: { hits.hits.0._source.object.1.secret: nomatch } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.object.secret: mission } + - match: { hits.hits.1._source.object.public: interest } + +--- +Filter on object with stored source: + - do: + indices.create: + index: index_dls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + obj: + type: object + store_array_source: true + properties: + secret: + type: keyword + runtime: + secret: + type: keyword + script: + source: "emit(params._source.obj.0.secret)" + + - do: + bulk: + index: index_dls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "obj": [ { "secret": "mission" }, { "foo": "bar" } ] }' + - '{"create": { }}' + - '{"name": "B", "obj": [ { "secret": "common" }, {"foo": "baz"} ] }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_dls" ] + privileges: [ "read" ] + query: + term: + secret: mission + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_dls + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.obj.0.secret: mission } + - match: { hits.hits.0._source.obj.1.foo: bar } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.obj.0.secret: common } + - match: { hits.hits.1._source.obj.1.foo: baz } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_dls + sort: name + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.obj.0.secret: mission } + - match: { hits.hits.0._source.obj.1.foo: bar } + + +--- +Filter on field within a disabled object: + - do: + indices.create: + index: index_dls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: object + enabled: false + runtime: + secret: + type: keyword + script: + source: "emit(params._source.object.secret)" + + - do: + bulk: + index: index_dls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object": { "secret":"mission", "public":"interest" } }' + - '{"create": { }}' + - '{"name": "B", "object": { "secret":"common", "foo":"bar" } }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_dls" ] + privileges: [ "read", "monitor" ] + query: + term: + secret: mission + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_dls + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.secret: mission } + - match: { hits.hits.0._source.object.public: interest } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.object.secret: common } + - match: { hits.hits.1._source.object.foo: bar } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_dls + sort: name + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.secret: mission } + - match: { hits.hits.0._source.object.public: interest } + + +--- +Filter on field with ignored_malformed: + - do: + indices.create: + index: index_dls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + secret: + type: integer + ignore_malformed: true + runtime: + rt_secret: + type: keyword + script: + source: "emit(params._source.secret)" + + - do: + bulk: + index: index_dls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "secret": "mission"}' + - '{"create": { }}' + - '{"name": "B", "secret": "top" }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_dls" ] + privileges: [ "read" ] + query: + term: + rt_secret: mission + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_dls + sort: name + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.secret: mission } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.secret: top } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_dls + sort: name + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.secret: mission } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java index b2dc04c1178e4..4d2789dbb8591 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java @@ -51,7 +51,12 @@ public class LegacyStackTemplateRegistry extends IndexTemplateRegistry { private final FeatureService featureService; private volatile boolean stackTemplateEnabled; - private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "true"); + private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of( + "xpack.stack.template.deprecated", + "true", + "xpack.stack.template.logs.index.mode", + "standard" + ); // General mappings conventions for any data that ends up in a data stream public static final String DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME = "data-streams-mappings"; diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index 34cacbb8956e5..648146ccdcc61 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -47,7 +47,7 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { // The stack template registry version. This number must be incremented when we make changes // to built-in templates. - public static final int REGISTRY_VERSION = 11; + public static final int REGISTRY_VERSION = 12; public static final String TEMPLATE_VERSION_VARIABLE = "xpack.stack.template.version"; public static final Setting STACK_TEMPLATES_ENABLED = Setting.boolSetting( @@ -68,10 +68,9 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { private final ClusterService clusterService; private final FeatureService featureService; + private final Map componentTemplateConfigs; private volatile boolean stackTemplateEnabled; - private final boolean logsIndexModeTemplateEnabled; - public static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "false"); // General mappings conventions for any data that ends up in a data stream @@ -132,53 +131,10 @@ public StackTemplateRegistry( this.clusterService = clusterService; this.featureService = featureService; this.stackTemplateEnabled = STACK_TEMPLATES_ENABLED.get(nodeSettings); - this.logsIndexModeTemplateEnabled = CLUSTER_LOGSDB_ENABLED.get(nodeSettings); - } - - @Override - public void initialize() { - super.initialize(); - clusterService.getClusterSettings().addSettingsUpdateConsumer(STACK_TEMPLATES_ENABLED, this::updateEnabledSetting); - } - - private void updateEnabledSetting(boolean newValue) { - if (newValue) { - this.stackTemplateEnabled = true; - } else { - logger.info( - "stack composable templates [{}] and component templates [{}] will not be installed or reinstalled", - String.join(",", getComposableTemplateConfigs().keySet()), - String.join(",", getComponentTemplateConfigs().keySet()) - ); - this.stackTemplateEnabled = false; - } - } - - private static final List LIFECYCLE_POLICY_CONFIGS = List.of( - new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), - new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), - new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), - new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), - new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), - new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), - new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), - new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES) - ); - - @Override - protected List getLifecycleConfigs() { - return LIFECYCLE_POLICY_CONFIGS; - } - - @Override - protected List getLifecyclePolicies() { - return lifecyclePolicies; + this.componentTemplateConfigs = loadComponentTemplateConfigs(CLUSTER_LOGSDB_ENABLED.get(nodeSettings)); } - private static final Map COMPONENT_TEMPLATE_CONFIGS; - private static final Map LOGSDB_COMPONENT_TEMPLATE_CONFIGS; - - static { + private Map loadComponentTemplateConfigs(boolean logsDbEnabled) { final Map componentTemplates = new HashMap<>(); for (IndexTemplateConfig config : List.of( new IndexTemplateConfig( @@ -190,7 +146,7 @@ protected List getLifecyclePolicies() { ), new IndexTemplateConfig( LOGS_MAPPINGS_COMPONENT_TEMPLATE_NAME, - "/logs@mappings.json", + logsDbEnabled ? "/logs@mappings-logsdb.json" : "/logs@mappings.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE, ADDITIONAL_TEMPLATE_VARIABLES @@ -207,7 +163,12 @@ protected List getLifecyclePolicies() { "/logs@settings.json", REGISTRY_VERSION, TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES + Map.of( + "xpack.stack.template.deprecated", + "false", + "xpack.stack.template.logs.index.mode", + logsDbEnabled ? "logs" : "standard" + ) ), new IndexTemplateConfig( METRICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, @@ -261,99 +222,52 @@ protected List getLifecyclePolicies() { throw new AssertionError(e); } } - COMPONENT_TEMPLATE_CONFIGS = Map.copyOf(componentTemplates); + return Map.copyOf(componentTemplates); + } - final Map logsdbComponentTemplates = new HashMap<>(); - for (IndexTemplateConfig config : List.of( - new IndexTemplateConfig( - DATA_STREAMS_MAPPINGS_COMPONENT_TEMPLATE_NAME, - "/data-streams@mappings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - LOGS_MAPPINGS_COMPONENT_TEMPLATE_NAME, - "/logs@mappings-logsdb.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - ECS_DYNAMIC_MAPPINGS_COMPONENT_TEMPLATE_NAME, - "/ecs@mappings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - LOGS_SETTINGS_COMPONENT_TEMPLATE_NAME, - "/logs@settings-logsdb.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - METRICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, - "/metrics@mappings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - METRICS_SETTINGS_COMPONENT_TEMPLATE_NAME, - "/metrics@settings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - METRICS_TSDB_SETTINGS_COMPONENT_TEMPLATE_NAME, - "/metrics@tsdb-settings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - SYNTHETICS_MAPPINGS_COMPONENT_TEMPLATE_NAME, - "/synthetics@mappings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - SYNTHETICS_SETTINGS_COMPONENT_TEMPLATE_NAME, - "/synthetics@settings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ), - new IndexTemplateConfig( - KIBANA_REPORTING_COMPONENT_TEMPLATE_NAME, - "/kibana-reporting@settings.json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE, - ADDITIONAL_TEMPLATE_VARIABLES - ) - )) { - try { - logsdbComponentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); - } catch (IOException e) { - throw new AssertionError(e); - } + @Override + public void initialize() { + super.initialize(); + clusterService.getClusterSettings().addSettingsUpdateConsumer(STACK_TEMPLATES_ENABLED, this::updateEnabledSetting); + } + + private void updateEnabledSetting(boolean newValue) { + if (newValue) { + this.stackTemplateEnabled = true; + } else { + logger.info( + "stack composable templates [{}] and component templates [{}] will not be installed or reinstalled", + String.join(",", getComposableTemplateConfigs().keySet()), + String.join(",", getComponentTemplateConfigs().keySet()) + ); + this.stackTemplateEnabled = false; } - LOGSDB_COMPONENT_TEMPLATE_CONFIGS = Map.copyOf(logsdbComponentTemplates); + } + + private static final List LIFECYCLE_POLICY_CONFIGS = List.of( + new LifecyclePolicyConfig(LOGS_ILM_POLICY_NAME, "/logs@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(METRICS_ILM_POLICY_NAME, "/metrics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(SYNTHETICS_ILM_POLICY_NAME, "/synthetics@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_7_DAYS_POLICY_NAME, "/7-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_30_DAYS_POLICY_NAME, "/30-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_90_DAYS_POLICY_NAME, "/90-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_180_DAYS_POLICY_NAME, "/180-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES), + new LifecyclePolicyConfig(ILM_365_DAYS_POLICY_NAME, "/365-days@lifecycle.json", ADDITIONAL_TEMPLATE_VARIABLES) + ); + + @Override + protected List getLifecycleConfigs() { + return LIFECYCLE_POLICY_CONFIGS; + } + + @Override + protected List getLifecyclePolicies() { + return lifecyclePolicies; } @Override protected Map getComponentTemplateConfigs() { - if (logsIndexModeTemplateEnabled) { - return LOGSDB_COMPONENT_TEMPLATE_CONFIGS; - } - return COMPONENT_TEMPLATE_CONFIGS; + return componentTemplateConfigs; } private static final Map COMPOSABLE_INDEX_TEMPLATE_CONFIGS = parseComposableTemplates( diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java index 8f21651d4ade0..6268162625977 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -201,7 +202,7 @@ static void createDestinationIndex( ActionListener.wrap(createIndexResponse -> { listener.onResponse(true); }, e -> { - if (e instanceof ResourceAlreadyExistsException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { // Already existing index is ok, it could have been created by the indexing process of the running transform. listener.onResponse(false); return; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java index 712a95ded2076..bd0224f49814e 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java @@ -63,6 +63,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import java.util.stream.Stream; import static java.util.Collections.emptyMap; @@ -295,7 +296,8 @@ protected void onStart(long now, ActionListener listener) { ActionListener finalListener = listener.delegateFailureAndWrap((l, r) -> { // if we haven't set the page size yet, if it is set we might have reduced it after running into an out of memory if (context.getPageSize() == 0) { - configurePageSize(getConfig().getSettings().getMaxPageSearchSize()); + // check the pageSize again in case another thread has updated it + configurePageSize(() -> context.getPageSize() == 0, getConfig().getSettings().getMaxPageSearchSize()); } runState = determineRunStateAtStart(); @@ -571,7 +573,11 @@ private void executeRetentionPolicy(ActionListener listener) { private void finalizeCheckpoint(ActionListener listener) { try { // reset the page size, so we do not memorize a low page size forever - resetPageSize(); + var pageSize = initialConfiguredPageSize; + // only update if the initialConfiguredPageSize hadn't been changed by the user between the last line and the next line + // if the user also called configurePageSize, keep their new value rather than resetting it to their previous value + configurePageSize(() -> Objects.equals(pageSize, initialConfiguredPageSize), pageSize); + // reset the changed bucket to free memory if (changeCollector != null) { changeCollector.clear(); @@ -670,9 +676,10 @@ public void applyNewSettings(SettingsConfig newSettings) { logger.info("[{}] transform settings have been updated.", transformConfig.getId()); docsPerSecond = newSettings.getDocsPerSecond() != null ? newSettings.getDocsPerSecond() : -1; - if (Objects.equals(newSettings.getMaxPageSearchSize(), initialConfiguredPageSize) == false) { - configurePageSize(newSettings.getMaxPageSearchSize()); - } + configurePageSize( + () -> Objects.equals(newSettings.getMaxPageSearchSize(), initialConfiguredPageSize) == false, + newSettings.getMaxPageSearchSize() + ); rethrottle(); } @@ -1231,19 +1238,19 @@ private RunState determineRunStateAtStart() { return RunState.IDENTIFY_CHANGES; } - private void configurePageSize(Integer newPageSize) { - initialConfiguredPageSize = newPageSize; - resetPageSize(); - } - - private void resetPageSize() { - if (initialConfiguredPageSize != null && initialConfiguredPageSize > 0) { - context.setPageSize(initialConfiguredPageSize); - } else if (function != null) { - context.setPageSize(function.getInitialPageSize()); - } else { - // we should never be in a state where both initialConfiguredPageSize and function are null, but just in case... - context.setPageSize(Transform.DEFAULT_INITIAL_MAX_PAGE_SEARCH_SIZE); + private void configurePageSize(Supplier shouldUpdate, Integer newPageSize) { + synchronized (context) { + if (shouldUpdate.get()) { + initialConfiguredPageSize = newPageSize; + if (newPageSize != null && newPageSize > 0) { + context.setPageSize(initialConfiguredPageSize); + } else if (function != null) { + context.setPageSize(function.getInitialPageSize()); + } else { + // we should never be in a state where both initialConfiguredPageSize and function are null, but just in case... + context.setPageSize(Transform.DEFAULT_INITIAL_MAX_PAGE_SEARCH_SIZE); + } + } } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java index 3412be813dcf6..23bab56de5ec9 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/common/AbstractCompositeAggFunction.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.transform.transforms.common; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; @@ -45,6 +47,7 @@ * Basic abstract class for implementing a transform function that utilizes composite aggregations */ public abstract class AbstractCompositeAggFunction implements Function { + private static final Logger logger = LogManager.getLogger(AbstractCompositeAggFunction.class); public static final int TEST_QUERY_PAGE_SIZE = 50; public static final String COMPOSITE_AGGREGATION_NAME = "_transform"; @@ -78,7 +81,7 @@ public void preview( ClientHelper.TRANSFORM_ORIGIN, client, TransportSearchAction.TYPE, - buildSearchRequest(sourceConfig, timeout, numberOfBuckets), + buildSearchRequestForValidation("preview", sourceConfig, timeout, numberOfBuckets), ActionListener.wrap(r -> { try { final InternalAggregations aggregations = r.getAggregations(); @@ -116,7 +119,7 @@ public void validateQuery( TimeValue timeout, ActionListener listener ) { - SearchRequest searchRequest = buildSearchRequest(sourceConfig, timeout, TEST_QUERY_PAGE_SIZE); + SearchRequest searchRequest = buildSearchRequestForValidation("validate", sourceConfig, timeout, TEST_QUERY_PAGE_SIZE); ClientHelper.executeWithHeadersAsync( headers, ClientHelper.TRANSFORM_ORIGIN, @@ -193,11 +196,12 @@ protected abstract Stream> extractResults( TransformProgress progress ); - private SearchRequest buildSearchRequest(SourceConfig sourceConfig, TimeValue timeout, int pageSize) { + private SearchRequest buildSearchRequestForValidation(String logId, SourceConfig sourceConfig, TimeValue timeout, int pageSize) { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(sourceConfig.getQueryConfig().getQuery()) .runtimeMappings(sourceConfig.getRuntimeMappings()) .timeout(timeout); buildSearchQuery(sourceBuilder, null, pageSize); + logger.debug("[{}] Querying {} for data: {}", logId, sourceConfig.getIndex(), sourceBuilder); return new SearchRequest(sourceConfig.getIndex()).source(sourceBuilder).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java index ce43a353ff414..17f9fedecbdff 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.transform.persistence; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -22,6 +23,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.transform.transforms.DestAlias; @@ -165,6 +167,37 @@ public void testCreateDestinationIndex() throws IOException { assertThat(createIndexRequest.aliases(), is(empty())); } + public void testCreateDestinationIndexThrowsResourceAlreadyExistsException() throws InterruptedException { + doAnswer(withFailure(new ResourceAlreadyExistsException("blah"))).when(client).execute(any(), any(), any()); + + var latch = new CountDownLatch(1); + + TransformIndex.createDestinationIndex( + client, + TransformConfigTests.randomTransformConfig(TRANSFORM_ID), + TransformIndex.createTransformDestIndexSettings(Settings.EMPTY, new HashMap<>(), TRANSFORM_ID, clock), + new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(Assert::assertFalse), latch) + ); + + assertTrue("Timed out waiting for test to finish", latch.await(10, TimeUnit.SECONDS)); + } + + public void testCreateDestinationIndexThrowsWrappedResourceAlreadyExistsException() throws InterruptedException { + doAnswer(withFailure(new RemoteTransportException("blah", new ResourceAlreadyExistsException("also blah")))).when(client) + .execute(any(), any(), any()); + + var latch = new CountDownLatch(1); + + TransformIndex.createDestinationIndex( + client, + TransformConfigTests.randomTransformConfig(TRANSFORM_ID), + TransformIndex.createTransformDestIndexSettings(Settings.EMPTY, new HashMap<>(), TRANSFORM_ID, clock), + new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(Assert::assertFalse), latch) + ); + + assertTrue("Timed out waiting for test to finish", latch.await(10, TimeUnit.SECONDS)); + } + public void testSetUpDestinationAliases_NullAliases() { doAnswer(withResponse(null)).when(client).execute(any(), any(), any()); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java index 1c268174f5be5..644518538638d 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java @@ -478,7 +478,7 @@ public void testInterActionWhileIndexerShutsdown() throws Exception { } public void testMaxPageSearchSizeIsResetToDefaultValue() throws Exception { - TransformConfig config = new TransformConfig( + var config = new TransformConfig( randomAlphaOfLength(10), randomSourceConfig(), randomDestConfig(), @@ -494,13 +494,12 @@ public void testMaxPageSearchSizeIsResetToDefaultValue() throws Exception { null, null ); - AtomicReference state = new AtomicReference<>(IndexerState.STARTED); - TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); - final MockedTransformIndexer indexer = createMockIndexer( + var context = new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); + var indexer = createMockIndexer( 1, config, - state, + new AtomicReference<>(IndexerState.STARTED), null, threadPool, auditor, @@ -509,7 +508,7 @@ public void testMaxPageSearchSizeIsResetToDefaultValue() throws Exception { ); // add latches - CountDownLatch searchLatch = indexer.createAwaitForSearchLatch(1); + var searchLatch = indexer.createAwaitForSearchLatch(1); indexer.addAfterFinishOrFailureLatch(); indexer.start(); @@ -525,14 +524,14 @@ public void testMaxPageSearchSizeIsResetToDefaultValue() throws Exception { // run and wait searchLatch.countDown(); - indexer.waitForAfterFinishOrFailureLatch(5, TimeUnit.SECONDS); + indexer.waitForAfterFinishOrFailureLatch(10, TimeUnit.SECONDS); // rerun, don't throw an exception this time searchLatch = indexer.createAwaitForSearchLatch(1); indexer.addAfterFinishOrFailureLatch(); assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); searchLatch.countDown(); - indexer.waitForAfterFinishOrFailureLatch(5, TimeUnit.SECONDS); + indexer.waitForAfterFinishOrFailureLatch(10, TimeUnit.SECONDS); // verify that we checked the pageSize decreased assertTrue(indexer.runBeforeOnFinish.isEmpty()); @@ -541,7 +540,7 @@ public void testMaxPageSearchSizeIsResetToDefaultValue() throws Exception { } public void testMaxPageSearchSizeIsResetToConfiguredValue() throws Exception { - TransformConfig config = new TransformConfig( + var config = new TransformConfig( randomAlphaOfLength(10), randomSourceConfig(), randomDestConfig(), @@ -557,13 +556,12 @@ public void testMaxPageSearchSizeIsResetToConfiguredValue() throws Exception { null, null ); - AtomicReference state = new AtomicReference<>(IndexerState.STARTED); - TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); - final MockedTransformIndexer indexer = createMockIndexer( + var context = new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); + var indexer = createMockIndexer( 1, config, - state, + new AtomicReference<>(IndexerState.STARTED), null, threadPool, auditor, @@ -572,7 +570,7 @@ public void testMaxPageSearchSizeIsResetToConfiguredValue() throws Exception { ); // add latches - CountDownLatch searchLatch = indexer.createAwaitForSearchLatch(1); + var searchLatch = indexer.createAwaitForSearchLatch(1); indexer.addAfterFinishOrFailureLatch(); indexer.start(); @@ -591,14 +589,14 @@ public void testMaxPageSearchSizeIsResetToConfiguredValue() throws Exception { // run and wait searchLatch.countDown(); - indexer.waitForAfterFinishOrFailureLatch(5, TimeUnit.SECONDS); + indexer.waitForAfterFinishOrFailureLatch(10, TimeUnit.SECONDS); // rerun, don't throw an exception this time searchLatch = indexer.createAwaitForSearchLatch(1); indexer.addAfterFinishOrFailureLatch(); assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); searchLatch.countDown(); - indexer.waitForAfterFinishOrFailureLatch(5, TimeUnit.SECONDS); + indexer.waitForAfterFinishOrFailureLatch(10, TimeUnit.SECONDS); // verify that we checked the pageSize decreased assertTrue(indexer.runBeforeOnFinish.isEmpty()); @@ -606,6 +604,72 @@ public void testMaxPageSearchSizeIsResetToConfiguredValue() throws Exception { assertEquals(configuredMaxPageSearchSize, context.getPageSize()); } + public void testMaxPageSearchSizePrioritizesMostRecentSettings() throws Exception { + var settingsLatch = new CountDownLatch(1); + var blockingSettings = new SettingsConfig(null, null, null, null, null, null, null, null) { + @Override + public Integer getMaxPageSearchSize() { + try { + // block the indexer thread by stopping it when it tries to initialize the pageSize to null + settingsLatch.await(); + } catch (InterruptedException e) { + fail(e, "Failed test waiting for settings latch to release."); + } + return null; + } + }; + + var config = new TransformConfig( + randomAlphaOfLength(10), + randomSourceConfig(), + randomDestConfig(), + null, + new TimeSyncConfig("timestamp", TimeValue.timeValueSeconds(1)), + null, + randomPivotConfig(), + null, + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), + blockingSettings, + null, + null, + null, + null + ); + + var context = new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); + var indexer = createMockIndexer( + 1, + config, + new AtomicReference<>(IndexerState.STARTED), + null, + threadPool, + auditor, + new TransformIndexerStats(), + context + ); + + // add latches + indexer.addAfterFinishOrFailureLatch(); + + indexer.start(); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertEquals(indexer.getState(), IndexerState.INDEXING); + + // simulate the user updating the pageSize setting to 20,000 + var configuredMaxPageSearchSize = 20_000; + indexer.applyNewSettings( + new SettingsConfig.Builder(SettingsConfig.EMPTY).setMaxPageSearchSize(configuredMaxPageSearchSize).build() + ); + + // unblock the indexer thread, which will now try to update the pageSize setting to null + settingsLatch.countDown(); + // wait for the indexer to finish + indexer.waitForAfterFinishOrFailureLatch(10, TimeUnit.SECONDS); + + // verify that the pageSize is the new applied setting and not null + assertEquals(configuredMaxPageSearchSize, context.getPageSize()); + } + private MockedTransformIndexer createMockIndexer( int numberOfLoops, TransformConfig config, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index ba07c3137340d..ced131640f0ee 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -34,6 +34,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; @@ -50,6 +51,7 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { private final TimeValue tickInterval; private final Map schedules = new ConcurrentHashMap<>(); private final Ticker ticker; + private final AtomicBoolean isRunning = new AtomicBoolean(false); public TickerScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { super(scheduleRegistry, clock); @@ -60,7 +62,8 @@ public TickerScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleR @Override public synchronized void start(Collection jobs) { long startTime = clock.millis(); - logger.info("Watcher starting watches at {}", WatcherDateTimeUtils.dateTimeFormatter.formatMillis(startTime)); + isRunning.set(true); + logger.info("Starting watcher engine at {}", WatcherDateTimeUtils.dateTimeFormatter.formatMillis(startTime)); Map startingSchedules = Maps.newMapWithExpectedSize(jobs.size()); for (Watch job : jobs) { if (job.trigger() instanceof ScheduleTrigger trigger) { @@ -81,17 +84,22 @@ public synchronized void start(Collection jobs) { @Override public void stop() { + logger.info("Stopping watcher engine"); + isRunning.set(false); schedules.clear(); ticker.close(); } @Override - public synchronized void pauseExecution() { + public void pauseExecution() { + logger.info("Pausing watcher engine"); + isRunning.set(false); schedules.clear(); } @Override public void add(Watch watch) { + logger.trace("Adding watch [{}] to engine (engine is running: {})", watch.id(), isRunning.get()); assert watch.trigger() instanceof ScheduleTrigger; ScheduleTrigger trigger = (ScheduleTrigger) watch.trigger(); ActiveSchedule currentSchedule = schedules.get(watch.id()); @@ -106,13 +114,25 @@ public void add(Watch watch) { @Override public boolean remove(String jobId) { + logger.debug("Removing watch [{}] from engine (engine is running: {})", jobId, isRunning.get()); return schedules.remove(jobId) != null; } void checkJobs() { + if (isRunning.get() == false) { + logger.debug( + "Watcher not running because the engine is paused. Currently scheduled watches being skipped: {}", + schedules.size() + ); + return; + } long triggeredTime = clock.millis(); List events = new ArrayList<>(); for (ActiveSchedule schedule : schedules.values()) { + if (isRunning.get() == false) { + logger.debug("Watcher paused while running [{}]", schedule.name); + break; + } long scheduledTime = schedule.check(triggeredTime); if (scheduledTime > 0) { ZonedDateTime triggeredDateTime = utcDateTimeAtEpochMillis(triggeredTime); diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java index 6f2c15836a04a..52dbc3d9e1ecb 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java @@ -238,8 +238,14 @@ Builder nullValue(String nullValue) { @Override public WildcardFieldMapper build(MapperBuilderContext context) { return new WildcardFieldMapper( - name(), - new WildcardFieldType(context.buildFullName(name()), nullValue.get(), ignoreAbove.get(), indexVersionCreated, meta.get()), + leafName(), + new WildcardFieldType( + context.buildFullName(leafName()), + nullValue.get(), + ignoreAbove.get(), + indexVersionCreated, + meta.get() + ), ignoreAbove.get(), context.isSourceSynthetic(), multiFieldsBuilder.build(this, context), @@ -943,7 +949,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio if (value.length() <= ignoreAbove) { createFields(value, parseDoc, fields); } else { - context.addIgnoredField(name()); + context.addIgnoredField(fullPath()); if (storeIgnored) { parseDoc.add(new StoredField(originalName(), new BytesRef(value))); } @@ -953,7 +959,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } private String originalName() { - return name() + "._original"; + return fullPath() + "._original"; } void createFields(String value, LuceneDocument parseDoc, List fields) { @@ -982,7 +988,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), indexVersionCreated).init(this); + return new Builder(leafName(), indexVersionCreated).init(this); } @Override @@ -994,7 +1000,7 @@ protected SyntheticSourceMode syntheticSourceMode() { public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { if (copyTo.copyToFields().isEmpty() != true) { throw new IllegalArgumentException( - "field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" + "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } return new WildcardSyntheticFieldLoader(); @@ -1017,7 +1023,7 @@ public Stream> storedFieldLoaders() { @Override public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { - BinaryDocValues values = leafReader.getBinaryDocValues(name()); + BinaryDocValues values = leafReader.getBinaryDocValues(fullPath()); if (values == null) { docValueCount = 0; return null; @@ -1047,10 +1053,10 @@ public void write(XContentBuilder b) throws IOException { case 0: return; case 1: - b.field(simpleName()); + b.field(leafName()); break; default: - b.startArray(simpleName()); + b.startArray(leafName()); } for (int i = 0; i < docValueCount; i++) { int length = docValuesStream.readVInt(); @@ -1069,7 +1075,7 @@ public void write(XContentBuilder b) throws IOException { @Override public String fieldName() { - return name(); + return fullPath(); } } } diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java index 6f5b75441e10a..2840b3788644c 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java @@ -54,7 +54,7 @@ private void addFields(LuceneDocument parseDoc, Document doc, String docContent) } private void indexDoc(LuceneDocument parseDoc, Document doc, RandomIndexWriter iw) throws IOException { - IndexableField field = parseDoc.getByKey(wildcardFieldMapper.name()); + IndexableField field = parseDoc.getByKey(wildcardFieldMapper.fullPath()); if (field != null) { doc.add(field); } diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index 07d4491daedf6..578fc90bd3e5b 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -1136,7 +1136,7 @@ private void addFields(LuceneDocument parseDoc, Document doc, String docContent) } private void indexDoc(LuceneDocument parseDoc, Document doc, RandomIndexWriter iw) throws IOException { - IndexableField field = parseDoc.getByKey(wildcardFieldType.name()); + IndexableField field = parseDoc.getByKey(wildcardFieldType.fullPath()); if (field != null) { doc.add(field); } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index c7c51a2a96c87..d7cab65df1ac9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -33,6 +33,7 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase { ); protected static final String UPGRADE_FROM_VERSION = System.getProperty("tests.upgrade_from_version"); + protected static final boolean FIRST_MIXED_ROUND = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); protected static final boolean SKIP_ML_TESTS = Booleans.parseBoolean(System.getProperty("tests.ml.skip", "false")); protected static boolean isOriginalCluster(String clusterVersion) { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java index 4b39f71dea1a9..d31130e970f03 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java @@ -6,54 +6,70 @@ */ package org.elasticsearch.upgrades; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_KEY; +import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class SecurityIndexRolesMetadataMigrationIT extends AbstractUpgradeTestCase { - public void testMetadataMigratedAfterUpgrade() throws Exception { - String testRole = "test-role"; - String metaKey = "test_key"; - String metaValue = "test_value"; - - Map testMetadata = Map.of(metaKey, metaValue); + public void testRoleMigration() throws Exception { + String oldTestRole = "old-test-role"; + String mixed1TestRole = "mixed1-test-role"; + String mixed2TestRole = "mixed2-test-role"; + String upgradedTestRole = "upgraded-test-role"; if (CLUSTER_TYPE == ClusterType.OLD) { - createRole(testRole, testMetadata); - assertEntityInSecurityIndex(testRole); - } - if (CLUSTER_TYPE == ClusterType.UPGRADED) { - refreshSecurityIndex(); - waitForMigrationCompletion(); - assertEntityInSecurityIndex(testRole, metaKey, metaValue); - } - } - - public void testMetadataWrittenAfterUpgradeWithoutMigration() throws IOException { - String testRole = "another-test-role"; - String metaKey = "another-test_key"; - String metaValue = "another-test_value"; - - Map testMetadata = Map.of(metaKey, metaValue); - - if (CLUSTER_TYPE == ClusterType.UPGRADED) { - createRole(testRole, testMetadata); - assertEntityInSecurityIndex(testRole, metaKey, metaValue); + createRoleWithMetadata(oldTestRole, Map.of("meta", "test")); + assertDocInSecurityIndex(oldTestRole); + if (canRolesBeMigrated() == false) { + assertNoMigration(adminClient()); + assertCannotQueryRolesByMetadata(client()); + } + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + if (FIRST_MIXED_ROUND) { + createRoleWithMetadata(mixed1TestRole, Map.of("meta", "test")); + assertDocInSecurityIndex(mixed1TestRole); + } else { + createRoleWithMetadata(mixed2TestRole, Map.of("meta", "test")); + assertDocInSecurityIndex(mixed2TestRole); + } + if (canRolesBeMigrated() == false) { + assertNoMigration(adminClient()); + assertCannotQueryRolesByMetadata(client()); + } + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + createRoleWithMetadata(upgradedTestRole, Map.of("meta", "test")); + assertTrue(canRolesBeMigrated()); + waitForMigrationCompletion(adminClient()); + assertMigratedDocInSecurityIndex(oldTestRole, "meta", "test"); + assertMigratedDocInSecurityIndex(mixed1TestRole, "meta", "test"); + assertMigratedDocInSecurityIndex(mixed2TestRole, "meta", "test"); + assertMigratedDocInSecurityIndex(upgradedTestRole, "meta", "test"); + // queries all roles by metadata + assertAllRoles(client(), "mixed1-test-role", "mixed2-test-role", "old-test-role", "upgraded-test-role"); } } @SuppressWarnings("unchecked") - private void assertEntityInSecurityIndex(String roleName, String metaKey, String metaValue) throws IOException { + private void assertMigratedDocInSecurityIndex(String roleName, String metaKey, String metaValue) throws IOException { final Request request = new Request("POST", "/.security/_search"); RequestOptions.Builder options = request.getOptions().toBuilder(); request.setJsonEntity( @@ -79,7 +95,7 @@ private void assertEntityInSecurityIndex(String roleName, String metaKey, String } @SuppressWarnings("unchecked") - private void assertEntityInSecurityIndex(String id) throws IOException { + private void assertDocInSecurityIndex(String id) throws IOException { final Request request = new Request("POST", "/.security/_search"); RequestOptions.Builder options = request.getOptions().toBuilder(); request.setJsonEntity(String.format(Locale.ROOT, """ @@ -108,21 +124,36 @@ private void addExpectWarningOption(RequestOptions.Builder options) { } @SuppressWarnings("unchecked") - private void waitForMigrationCompletion() throws Exception { - final Request request = new Request("GET", "_cluster/state/metadata/.security-7"); + private static void assertNoMigration(RestClient adminClient) throws Exception { + Request request = new Request("GET", "_cluster/state/metadata/" + INTERNAL_SECURITY_MAIN_INDEX_7); + Response response = adminClient.performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + Map indicesMetadataMap = (Map) ((Map) responseMap.get("metadata")).get("indices"); + assertTrue(indicesMetadataMap.containsKey(INTERNAL_SECURITY_MAIN_INDEX_7)); + assertFalse( + ((Map) indicesMetadataMap.get(INTERNAL_SECURITY_MAIN_INDEX_7)).containsKey(MIGRATION_VERSION_CUSTOM_KEY) + ); + } + + @SuppressWarnings("unchecked") + private static void waitForMigrationCompletion(RestClient adminClient) throws Exception { + final Request request = new Request("GET", "_cluster/state/metadata/" + INTERNAL_SECURITY_MAIN_INDEX_7); assertBusy(() -> { - Response response = adminClient().performRequest(request); + Response response = adminClient.performRequest(request); assertOK(response); Map responseMap = responseAsMap(response); + Map indicesMetadataMap = (Map) ((Map) responseMap.get("metadata")).get( + "indices" + ); + assertTrue(indicesMetadataMap.containsKey(INTERNAL_SECURITY_MAIN_INDEX_7)); assertTrue( - ((Map) ((Map) ((Map) responseMap.get("metadata")).get("indices")).get( - ".security-7" - )).containsKey("migration_version") + ((Map) indicesMetadataMap.get(INTERNAL_SECURITY_MAIN_INDEX_7)).containsKey(MIGRATION_VERSION_CUSTOM_KEY) ); }); } - private void createRole(String roleName, Map metadata) throws IOException { + private void createRoleWithMetadata(String roleName, Map metadata) throws IOException { final Request request = new Request("POST", "/_security/role/" + roleName); BytesReference source = BytesReference.bytes( jsonBuilder().map( @@ -135,15 +166,56 @@ private void createRole(String roleName, Map metadata) throws IO ) ); request.setJsonEntity(source.utf8ToString()); - assertOK(adminClient().performRequest(request)); - refreshSecurityIndex(); + assertOK(client().performRequest(request)); } - private void refreshSecurityIndex() throws IOException { - Request request = new Request("POST", "/.security-7/_refresh"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - addExpectWarningOption(options); - request.setOptions(options); - assertOK(adminClient().performRequest(request)); + private void assertCannotQueryRolesByMetadata(RestClient client) { + List originalNodes = client.getNodes(); + try { + // try the query on every node (upgraded or not) + for (Node node : originalNodes) { + client.setNodes(List.of(node)); + String metadataQuery = """ + {"query":{"exists":{"field":"metadata.test"}}}"""; + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setJsonEntity(metadataQuery); + ResponseException e = expectThrows(ResponseException.class, () -> client.performRequest(request)); + if (e.getResponse().getStatusLine().getStatusCode() == 400) { + // this is an old node that doesn't know about the API + // note that 7.17 shows different error messages from "no handler found for uri" + } else if (e.getResponse().getStatusLine().getStatusCode() == 503) { + // this is an upgraded node, but migration does not work + assertThat(e.getMessage(), containsString("Cannot query or sort role metadata until automatic migration completed")); + } else { + fail(e, "Unexpected exception type"); + } + } + } finally { + client.setNodes(originalNodes); + } + } + + private void assertAllRoles(RestClient client, String... roleNames) throws IOException { + // this queries all roles by metadata + String metadataQuery = """ + {"query":{"bool":{"must":[{"exists":{"field":"metadata.meta"}}]}},"sort":["name"]}"""; + Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/role"); + request.setJsonEntity(metadataQuery); + Response response = client.performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + assertThat(responseMap.get("total"), is(roleNames.length)); + assertThat(responseMap.get("count"), is(roleNames.length)); + @SuppressWarnings("unchecked") + List> roles = new ArrayList<>((List>) responseMap.get("roles")); + assertThat(roles.size(), is(responseMap.get("count"))); + for (int i = 0; i < roleNames.length; i++) { + assertThat(roles.get(i).get("name"), equalTo(roleNames[i])); + } + } + + private boolean canRolesBeMigrated() { + return clusterHasFeature("security.migration_framework") != false + && clusterHasFeature("security.roles_metadata_flattened") != false; } } diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_categories.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_categories.json index daf4d9bfc7889..4fce55f682248 100644 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_categories.json +++ b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_categories.json @@ -34,7 +34,7 @@ } }, { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/categories/", + "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/categories", "methods":[ "GET", "POST" diff --git a/x-pack/rest-resources-zip/build.gradle b/x-pack/rest-resources-zip/build.gradle index 2ac8bd65ddc36..3d0533b4ec57e 100644 --- a/x-pack/rest-resources-zip/build.gradle +++ b/x-pack/rest-resources-zip/build.gradle @@ -25,6 +25,7 @@ dependencies { freeCompatTests project(path: ':rest-api-spec', configuration: 'restCompatTests') platinumTests project(path: ':x-pack:plugin', configuration: 'restXpackTests') platinumTests project(path: ':x-pack:plugin:eql:qa:rest', configuration: 'restXpackTests') + platinumTests project(path: ':x-pack:plugin:ent-search:qa:rest', configuration: 'restXpackTests') platinumCompatTests project(path: ':x-pack:plugin', configuration: 'restCompatTests') platinumCompatTests project(path: ':x-pack:plugin:eql:qa:rest', configuration: 'restCompatTests') }